From 0ffc09c8365391eaa6ecb56554ab188af1332d58 Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Wed, 2 Jul 2025 07:17:55 -0400 Subject: [PATCH 1/7] dev setup --- .cirrus.tasks.yml | 2 +- .clang-format | 71 ++ .envrc | 18 + .gitignore | 8 + .idea/.gitignore | 8 + .idea/editor.xml | 580 +++++++++++++++ .idea/inspectionProfiles/Project_Default.xml | 7 + .idea/misc.xml | 18 + .idea/prettier.xml | 6 + .idea/vcs.xml | 6 + .vscode/launch.json | 22 + .vscode/settings.json | 5 + flake.lock | 61 ++ flake.nix | 33 + pg-aliases.sh | 293 ++++++++ shell.nix | 738 +++++++++++++++++++ src/test/regress/pg_regress.c | 2 +- 17 files changed, 1876 insertions(+), 2 deletions(-) create mode 100644 .clang-format create mode 100644 .envrc create mode 100644 .idea/.gitignore create mode 100644 .idea/editor.xml create mode 100644 .idea/inspectionProfiles/Project_Default.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/prettier.xml create mode 100644 .idea/vcs.xml create mode 100644 .vscode/launch.json create mode 100644 .vscode/settings.json create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 pg-aliases.sh create mode 100644 shell.nix diff --git a/.cirrus.tasks.yml b/.cirrus.tasks.yml index 1a366975d824f..e00fd7a7631b0 100644 --- a/.cirrus.tasks.yml +++ b/.cirrus.tasks.yml @@ -26,7 +26,7 @@ env: # Build test dependencies as part of the build step, to see compiler # errors/warnings in one place. MBUILD_TARGET: all testprep - MTEST_ARGS: --print-errorlogs --no-rebuild -C build + MTEST_ARGS: --print-errorlogs --no-rebuild --maxfail=3 -C build PGCTLTIMEOUT: 120 # avoids spurious failures during parallel tests TEMP_CONFIG: ${CIRRUS_WORKING_DIR}/src/tools/ci/pg_ci_base.conf PG_TEST_EXTRA: kerberos ldap ssl libpq_encryption load_balance oauth diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000..2f786ac8eef05 --- /dev/null +++ b/.clang-format @@ -0,0 +1,71 @@ +# the official .clang-format style for https://github.com/taocpp +# +# clang-format-4.0 -i -style=file $(find -name '[^.]*.[hc]pp') + +Language: Cpp +Standard: Cpp11 + +AccessModifierOffset: -3 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: false +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: true + AfterControlStatement: false + AfterEnum : true + AfterFunction : true + AfterNamespace : true + AfterStruct : true + AfterUnion : true + BeforeCatch : true + BeforeElse : true + IndentBraces : false +BreakBeforeBinaryOperators: All +BreakBeforeBraces: Custom +BreakBeforeTernaryOperators: false +BreakStringLiterals: false +BreakConstructorInitializersBeforeComma: false +ColumnLimit: 0 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 3 +ContinuationIndentWidth: 3 +Cpp11BracedListStyle: false +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +IndentCaseLabels: true +IndentWidth: 3 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: true +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: All +PointerAlignment: Left +ReflowComments: false +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: Never +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: true +SpacesInCStyleCastParentheses: false +SpacesInContainerLiterals: true +SpacesInParentheses: true +SpacesInSquareBrackets: true +TabWidth: 8 +UseTab: Never diff --git a/.envrc b/.envrc new file mode 100644 index 0000000000000..f5b49c32db284 --- /dev/null +++ b/.envrc @@ -0,0 +1,18 @@ +# Bootstrap nix-direnv (ensures specific version, robust) +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi + +# Watch flake.nix for changes to reload the environment +watch_file flake.nix + +# Use the Nix flake to provide the development environment +use flake + +# Set consistent locale for development environment +#export LANGUAGE=C.utf8 +#export LANG=C.utf8 +#export LC_ALL=C.utf8 + +# IKOS-specific environment variable (keep if needed for your IKOS workflow) +export IKOS_SCAN_NOTIFIER_FILES="" diff --git a/.gitignore b/.gitignore index 4e911395fe3ba..8e429d66ca41f 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,11 @@ lib*.pc /Release/ /tmp_install/ /portlock/ + +build/ +install/ +test-db/ +.direnv/ +.cache/ +.history + diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000000000..13566b81b018a --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/editor.xml b/.idea/editor.xml new file mode 100644 index 0000000000000..1f0ef49b4faf4 --- /dev/null +++ b/.idea/editor.xml @@ -0,0 +1,580 @@ + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000000000..9c69411050eac --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,7 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000000000..53624c9e1f9ab --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,18 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/prettier.xml b/.idea/prettier.xml new file mode 100644 index 0000000000000..b0c1c68fbbad6 --- /dev/null +++ b/.idea/prettier.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000000000..35eb1ddfbbc02 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000000000..f5d97424c5047 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,22 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "(gdb) Attach Postgres", + "type": "cppdbg", + "request": "attach", + "program": "${workspaceRoot}/install/bin/postgres", + "MIMode": "gdb", + "setupCommands": [ + { + "description": "Enable pretty-printing for gdb", + "text": "-enable-pretty-printing", + "ignoreFailures": true + } + ], + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000000..cc8a64fa9fa85 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "files.associations": { + "syscache.h": "c" + } +} \ No newline at end of file diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000000000..41bcb32b86834 --- /dev/null +++ b/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1751211869, + "narHash": "sha256-1Cu92i1KSPbhPCKxoiVG5qnoRiKTgR5CcGSRyLpOd7Y=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "b43c397f6c213918d6cfe6e3550abfe79b5d1c51", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-25.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000000000..0b8a8eb086293 --- /dev/null +++ b/flake.nix @@ -0,0 +1,33 @@ +{ + description = "PostgreSQL development environment"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = { self, nixpkgs, flake-utils }: + flake-utils.lib.eachDefaultSystem (system: + let + pkgs = import nixpkgs { + inherit system; + config.allowUnfree = true; + }; + + shellConfig = import ./shell.nix { inherit pkgs system; }; + + in { + devShells = { + default = shellConfig.devShell; + gcc = shellConfig.devShell; + clang = shellConfig.clangDevShell; + gcc-musl = shellConfig.muslDevShell; + clang-musl = shellConfig.clangMuslDevShell; + }; + + packages = { + inherit (shellConfig) gdbConfig flameGraphScript pgbenchScript; + }; + } + ); +} diff --git a/pg-aliases.sh b/pg-aliases.sh new file mode 100644 index 0000000000000..6f6b58f4d9dad --- /dev/null +++ b/pg-aliases.sh @@ -0,0 +1,293 @@ +# PostgreSQL Development Aliases + +# Build system management +pg_clean_for_compiler() { + local current_compiler="$(basename $CC)" + local build_dir="$PG_BUILD_DIR" + + if [ -f "$build_dir/compile_commands.json" ]; then + local last_compiler=$(grep -o '/[^/]*/bin/[gc]cc\|/[^/]*/bin/clang' "$build_dir/compile_commands.json" | head -1 | xargs basename 2>/dev/null || echo "unknown") + + if [ "$last_compiler" != "$current_compiler" ] && [ "$last_compiler" != "unknown" ]; then + echo "Detected compiler change from $last_compiler to $current_compiler" + echo "Cleaning build directory..." + rm -rf "$build_dir" + mkdir -p "$build_dir" + fi + fi + + mkdir -p "$build_dir" + echo "$current_compiler" > "$build_dir/.compiler_used" +} + +# Core PostgreSQL commands +alias pg-setup=' + if [ -z "$PERL_CORE_DIR" ]; then + echo "Error: Could not find perl CORE directory" >&2 + return 1 + fi + + pg_clean_for_compiler + + echo "=== PostgreSQL Build Configuration ===" + echo "Compiler: $CC" + echo "LLVM: $(llvm-config --version 2>/dev/null || echo 'disabled')" + echo "Source: $PG_SOURCE_DIR" + echo "Build: $PG_BUILD_DIR" + echo "Install: $PG_INSTALL_DIR" + echo "======================================" + # --fatal-meson-warnings + + env CFLAGS="-I$PERL_CORE_DIR $CFLAGS" \ + LDFLAGS="-L$PERL_CORE_DIR -lperl $LDFLAGS" \ + meson setup --reconfigure \ + -Doptimization=g \ + -Ddebug=true \ + -Db_sanitize=none \ + -Db_lundef=false \ + -Dlz4=enabled \ + -Dzstd=enabled \ + -Dllvm=disabled \ + -Dplperl=enabled \ + -Dplpython=enabled \ + -Dpltcl=enabled \ + -Dlibxml=enabled \ + -Duuid=e2fs \ + -Dlibxslt=enabled \ + -Dssl=openssl \ + -Dldap=disabled \ + -Dcassert=true \ + -Dtap_tests=enabled \ + -Dinjection_points=true \ + -Ddocs_pdf=enabled \ + -Ddocs_html_style=website \ + --prefix="$PG_INSTALL_DIR" \ + "$PG_BUILD_DIR" \ + "$PG_SOURCE_DIR"' + +alias pg-build='meson compile -C "$PG_BUILD_DIR"' +alias pg-install='meson install -C "$PG_BUILD_DIR"' +alias pg-test='meson test -q --print-errorlogs -C "$PG_BUILD_DIR"' +# Clean commands +alias pg-clean='ninja -C "$PG_BUILD_DIR" clean' +alias pg-full-clean='rm -rf "$PG_BUILD_DIR" "$PG_INSTALL_DIR" && echo "Build and install directories cleaned"' + +# Database management +alias pg-init='rm -rf "$PG_DATA_DIR" && "$PG_INSTALL_DIR/bin/initdb" --debug --no-clean "$PG_DATA_DIR"' +alias pg-start='"$PG_INSTALL_DIR/bin/postgres" -D "$PG_DATA_DIR" -k "$PG_DATA_DIR"' +alias pg-stop='pkill -f "postgres.*-D.*$PG_DATA_DIR" || true' +alias pg-restart='pg-stop && sleep 2 && pg-start' +alias pg-status='pgrep -f "postgres.*-D.*$PG_DATA_DIR" && echo "PostgreSQL is running" || echo "PostgreSQL is not running"' + +# Client connections +alias pg-psql='"$PG_INSTALL_DIR/bin/psql" -h "$PG_DATA_DIR" postgres' +alias pg-createdb='"$PG_INSTALL_DIR/bin/createdb" -h "$PG_DATA_DIR"' +alias pg-dropdb='"$PG_INSTALL_DIR/bin/dropdb" -h "$PG_DATA_DIR"' + +# Debugging +alias pg-debug-gdb='gdb -x "$GDBINIT" "$PG_INSTALL_DIR/bin/postgres"' +alias pg-debug-lldb='lldb "$PG_INSTALL_DIR/bin/postgres"' +alias pg-debug=' + if command -v gdb >/dev/null 2>&1; then + pg-debug-gdb + elif command -v lldb >/dev/null 2>&1; then + pg-debug-lldb + else + echo "No debugger available (gdb or lldb required)" + fi' + +# Attach to running process +alias pg-attach-gdb=' + PG_PID=$(pgrep -f "postgres.*-D.*$PG_DATA_DIR" | head -1) + if [ -n "$PG_PID" ]; then + echo "Attaching GDB to PostgreSQL process $PG_PID" + gdb -x "$GDBINIT" -p "$PG_PID" + else + echo "No PostgreSQL process found" + fi' + +alias pg-attach-lldb=' + PG_PID=$(pgrep -f "postgres.*-D.*$PG_DATA_DIR" | head -1) + if [ -n "$PG_PID" ]; then + echo "Attaching LLDB to PostgreSQL process $PG_PID" + lldb -p "$PG_PID" + else + echo "No PostgreSQL process found" + fi' + +alias pg-attach=' + if command -v gdb >/dev/null 2>&1; then + pg-attach-gdb + elif command -v lldb >/dev/null 2>&1; then + pg-attach-lldb + else + echo "No debugger available (gdb or lldb required)" + fi' + +# Performance profiling and analysis +alias pg-valgrind='valgrind --tool=memcheck --leak-check=full --show-leak-kinds=all "$PG_INSTALL_DIR/bin/postgres" -D "$PG_DATA_DIR"' +alias pg-strace='strace -f -o /tmp/postgres.strace "$PG_INSTALL_DIR/bin/postgres" -D "$PG_DATA_DIR"' + +# Flame graph generation +alias pg-flame='pg-flame-generate' +alias pg-flame-30='pg-flame-generate 30' +alias pg-flame-60='pg-flame-generate 60' +alias pg-flame-120='pg-flame-generate 120' + +# Custom flame graph with specific duration and output +pg-flame-custom() { + local duration=${1:-30} + local output_dir=${2:-$PG_FLAME_DIR} + echo "Generating flame graph for ${duration}s, output to: $output_dir" + pg-flame-generate "$duration" "$output_dir" +} + +# Benchmarking with pgbench +alias pg-bench='pg-bench-run' +alias pg-bench-quick='pg-bench-run 5 1 100 1 30 select-only' +alias pg-bench-standard='pg-bench-run 10 2 1000 10 60 tpcb-like' +alias pg-bench-heavy='pg-bench-run 50 4 5000 100 300 tpcb-like' +alias pg-bench-readonly='pg-bench-run 20 4 2000 50 120 select-only' + +# Custom benchmark function +pg-bench-custom() { + local clients=${1:-10} + local threads=${2:-2} + local transactions=${3:-1000} + local scale=${4:-10} + local duration=${5:-60} + local test_type=${6:-tpcb-like} + + echo "Running custom benchmark:" + echo " Clients: $clients, Threads: $threads" + echo " Transactions: $transactions, Scale: $scale" + echo " Duration: ${duration}s, Type: $test_type" + + pg-bench-run "$clients" "$threads" "$transactions" "$scale" "$duration" "$test_type" +} + +# Benchmark with flame graph +pg-bench-flame() { + local duration=${1:-60} + local clients=${2:-10} + local scale=${3:-10} + + echo "Running benchmark with flame graph generation" + echo "Duration: ${duration}s, Clients: $clients, Scale: $scale" + + # Start benchmark in background + pg-bench-run "$clients" 2 1000 "$scale" "$duration" tpcb-like & + local bench_pid=$! + + # Wait a bit for benchmark to start + sleep 5 + + # Generate flame graph for most of the benchmark duration + local flame_duration=$((duration - 10)) + if [ $flame_duration -gt 10 ]; then + pg-flame-generate "$flame_duration" & + local flame_pid=$! + fi + + # Wait for benchmark to complete + wait $bench_pid + + # Wait for flame graph if it was started + if [ -n "${flame_pid:-}" ]; then + wait $flame_pid + fi + + echo "Benchmark and flame graph generation completed" +} + +# Performance monitoring +alias pg-perf='perf top -p $(pgrep -f "postgres.*-D.*$PG_DATA_DIR" | head -1)' +alias pg-htop='htop -p $(pgrep -f "postgres.*-D.*$PG_DATA_DIR" | tr "\n" "," | sed "s/,$//")' + +# System performance stats during PostgreSQL operation +pg-stats() { + local duration=${1:-30} + echo "Collecting system stats for ${duration}s..." + + iostat -x 1 "$duration" > "$PG_BENCH_DIR/iostat_$(date +%Y%m%d_%H%M%S).log" & + vmstat 1 "$duration" > "$PG_BENCH_DIR/vmstat_$(date +%Y%m%d_%H%M%S).log" & + + wait + echo "System stats saved to $PG_BENCH_DIR" +} + +# Log management +alias pg-log='tail -f "$PG_DATA_DIR/log/postgresql-$(date +%Y-%m-%d).log" 2>/dev/null || echo "No log file found"' +alias pg-log-errors='grep -i error "$PG_DATA_DIR/log/"*.log 2>/dev/null || echo "No error logs found"' + +# Build logs +alias pg-build-log='cat "$PG_BUILD_DIR/meson-logs/meson-log.txt"' +alias pg-build-errors='grep -i error "$PG_BUILD_DIR/meson-logs/meson-log.txt" 2>/dev/null || echo "No build errors found"' + +# Development helpers +alias pg-format=' + if [ ! -f "$PG_SOURCE_DIR/src/tools/pgindent/pgindent" ]; then + echo "Error: pgindent not found at $PG_SOURCE_DIR/src/tools/pgindent/pgindent" + return 1 + fi + + modified_files=$(git diff --name-only HEAD | egrep "\.c$|\.h$") + + if [ -z "$modified_files" ]; then + echo "No modified .c or .h files found" + return 0 + fi + + echo "Formatting modified files with pgindent:" + for file in $modified_files; do + if [ -f "$file" ]; then + echo " Formatting: $file" + "$PG_SOURCE_DIR/src/tools/pgindent/pgindent" "$file" + else + echo " Warning: File not found: $file" + fi + done + + echo "Formatting completed"' + +alias pg-tidy='find "$PG_SOURCE_DIR" -name "*.c" | head -10 | xargs clang-tidy' + +# Results viewing +alias pg-bench-results='ls -la "$PG_BENCH_DIR" && echo "Latest results:" && tail -20 "$PG_BENCH_DIR"/results_*.txt 2>/dev/null | tail -20' +alias pg-flame-results='ls -la "$PG_FLAME_DIR" && echo "Open flame graphs with: firefox $PG_FLAME_DIR/*.svg"' + +# Clean up old results +pg-clean-results() { + local days=${1:-7} + echo "Cleaning benchmark and flame graph results older than $days days..." + find "$PG_BENCH_DIR" -type f -mtime +$days -delete 2>/dev/null || true + find "$PG_FLAME_DIR" -type f -mtime +$days -delete 2>/dev/null || true + echo "Cleanup completed" +} + +# Information +alias pg-info=' + echo "=== PostgreSQL Development Environment ===" + echo "Source: $PG_SOURCE_DIR" + echo "Build: $PG_BUILD_DIR" + echo "Install: $PG_INSTALL_DIR" + echo "Data: $PG_DATA_DIR" + echo "Benchmarks: $PG_BENCH_DIR" + echo "Flame graphs: $PG_FLAME_DIR" + echo "Compiler: $CC" + echo "" + echo "Available commands:" + echo " Setup: pg-setup, pg-build, pg-install" + echo " Database: pg-init, pg-start, pg-stop, pg-psql" + echo " Debug: pg-debug, pg-attach, pg-valgrind" + echo " Performance: pg-flame, pg-bench, pg-perf" + echo " Benchmarks: pg-bench-quick, pg-bench-standard, pg-bench-heavy" + echo " Flame graphs: pg-flame-30, pg-flame-60, pg-flame-custom" + echo " Combined: pg-bench-flame" + echo " Results: pg-bench-results, pg-flame-results" + echo " Logs: pg-log, pg-build-log" + echo " Clean: pg-clean, pg-full-clean, pg-clean-results" + echo " Code quality: pg-format, pg-tidy" + echo "=========================================="' + +echo "PostgreSQL aliases loaded. Run 'pg-info' for available commands." diff --git a/shell.nix b/shell.nix new file mode 100644 index 0000000000000..94dc9d3d749a8 --- /dev/null +++ b/shell.nix @@ -0,0 +1,738 @@ +{ pkgs, system }: + +let + # Use LLVM 19 for modern PostgreSQL development + llvmPkgs = pkgs.llvmPackages_19; + + # Configuration constants + config = { + pgSourceDir = "$HOME/ws/postgresql"; + pgBuildDir = "$HOME/ws/postgresql/build"; + pgInstallDir = "$HOME/ws/postgresql/install"; + pgDataDir = "/tmp/test-db"; + pgBenchDir = "/tmp/pgbench-results"; + pgFlameDir = "/tmp/flame-graphs"; + }; + + # Single dependency function that can be used for all environments + getPostgreSQLDeps = muslLibs: with pkgs; [ + # Build system (always use host tools) + meson ninja pkg-config autoconf libtool git which + binutils gnumake + + # Parser/lexer tools + bison flex + + # Perl with required packages + (perl.withPackages (ps: with ps; [ IPCRun ])) + + # Documentation + docbook_xml_dtd_45 docbook-xsl-nons libxslt libxml2 fop + + # Development tools (always use host tools) + coreutils shellcheck ripgrep valgrind curl + gdb lldb strace ltrace + perf-tools linuxPackages.perf flamegraph + htop iotop sysstat + ccache clang-tools cppcheck + + # LLVM toolchain + llvmPkgs.llvm llvmPkgs.llvm.dev + + # Language support + (python3.withPackages (ps: with ps; [ requests browser-cookie3 ])) + tcl + ] ++ (if muslLibs then [ + # Musl target libraries for cross-compilation + pkgs.pkgsMusl.readline + pkgs.pkgsMusl.zlib + pkgs.pkgsMusl.openssl + pkgs.pkgsMusl.icu + pkgs.pkgsMusl.lz4 + pkgs.pkgsMusl.zstd + pkgs.pkgsMusl.libuuid + pkgs.pkgsMusl.libkrb5 + pkgs.pkgsMusl.linux-pam + pkgs.pkgsMusl.libxcrypt + ] else [ + # Glibc target libraries + readline zlib openssl icu lz4 zstd libuuid libkrb5 + linux-pam libxcrypt numactl openldap + liburing libselinux + glibc glibc.dev + ]); + + # GDB configuration for PostgreSQL debugging + gdbConfig = pkgs.writeText "gdbinit-postgres" '' + # PostgreSQL-specific GDB configuration + + # Pretty-print PostgreSQL data structures + define print_node + if $arg0 + printf "Node type: %s\n", nodeTagNames[$arg0->type] + print *$arg0 + else + printf "NULL node\n" + end + end + document print_node + Print a PostgreSQL Node with type information + Usage: print_node + end + + define print_list + set $list = (List*)$arg0 + if $list + printf "List length: %d\n", $list->length + set $cell = $list->head + set $i = 0 + while $cell && $i < $list->length + printf " [%d]: ", $i + print_node $cell->data.ptr_value + set $cell = $cell->next + set $i = $i + 1 + end + else + printf "NULL list\n" + end + end + document print_list + Print a PostgreSQL List structure + Usage: print_list + end + + define print_query + set $query = (Query*)$arg0 + if $query + printf "Query type: %d, command type: %d\n", $query->querySource, $query->commandType + print *$query + else + printf "NULL query\n" + end + end + document print_query + Print a PostgreSQL Query structure + Usage: print_query + end + + define print_relcache + set $rel = (Relation)$arg0 + if $rel + printf "Relation: %s.%s (OID: %u)\n", $rel->rd_rel->relnamespace, $rel->rd_rel->relname.data, $rel->rd_id + printf " natts: %d, relkind: %c\n", $rel->rd_rel->relnatts, $rel->rd_rel->relkind + else + printf "NULL relation\n" + end + end + document print_relcache + Print relation cache entry information + Usage: print_relcache + end + + define print_tupdesc + set $desc = (TupleDesc)$arg0 + if $desc + printf "TupleDesc: %d attributes\n", $desc->natts + set $i = 0 + while $i < $desc->natts + set $attr = $desc->attrs[$i] + printf " [%d]: %s (type: %u, len: %d)\n", $i, $attr->attname.data, $attr->atttypid, $attr->attlen + set $i = $i + 1 + end + else + printf "NULL tuple descriptor\n" + end + end + document print_tupdesc + Print tuple descriptor information + Usage: print_tupdesc + end + + define print_slot + set $slot = (TupleTableSlot*)$arg0 + if $slot + printf "TupleTableSlot: %s\n", $slot->tts_ops->name + printf " empty: %d, shouldFree: %d\n", $slot->tts_empty, $slot->tts_shouldFree + if $slot->tts_tupleDescriptor + print_tupdesc $slot->tts_tupleDescriptor + end + else + printf "NULL slot\n" + end + end + document print_slot + Print tuple table slot information + Usage: print_slot + end + + # Memory context debugging + define print_mcxt + set $context = (MemoryContext)$arg0 + if $context + printf "MemoryContext: %s\n", $context->name + printf " type: %s, parent: %p\n", $context->methods->name, $context->parent + printf " total: %zu, free: %zu\n", $context->mem_allocated, $context->freep - $context->freeptr + else + printf "NULL memory context\n" + end + end + document print_mcxt + Print memory context information + Usage: print_mcxt + end + + # Process debugging + define print_proc + set $proc = (PGPROC*)$arg0 + if $proc + printf "PGPROC: pid=%d, database=%u\n", $proc->pid, $proc->databaseId + printf " waiting: %d, waitStatus: %d\n", $proc->waiting, $proc->waitStatus + else + printf "NULL process\n" + end + end + document print_proc + Print process information + Usage: print_proc + end + + # Set useful defaults + set print pretty on + set print object on + set print static-members off + set print vtbl on + set print demangle on + set demangle-style gnu-v3 + set print sevenbit-strings off + set history save on + set history size 1000 + set history filename ~/.gdb_history_postgres + + # Common breakpoints for PostgreSQL debugging + define pg_break_common + break elog + break errfinish + break ExceptionalCondition + break ProcessInterrupts + end + document pg_break_common + Set common PostgreSQL debugging breakpoints + end + + printf "PostgreSQL GDB configuration loaded.\n" + printf "Available commands: print_node, print_list, print_query, print_relcache,\n" + printf " print_tupdesc, print_slot, print_mcxt, print_proc, pg_break_common\n" + ''; + + # Flame graph generation script + flameGraphScript = pkgs.writeScriptBin "pg-flame-generate" '' + #!${pkgs.bash}/bin/bash + set -euo pipefail + + DURATION=''${1:-30} + OUTPUT_DIR=''${2:-${config.pgFlameDir}} + TIMESTAMP=$(date +%Y%m%d_%H%M%S) + + mkdir -p "$OUTPUT_DIR" + + echo "Generating flame graph for PostgreSQL (duration: ''${DURATION}s)" + + # Find PostgreSQL processes + PG_PIDS=$(pgrep -f "postgres.*-D.*${config.pgDataDir}" || true) + + if [ -z "$PG_PIDS" ]; then + echo "Error: No PostgreSQL processes found" + exit 1 + fi + + echo "Found PostgreSQL processes: $PG_PIDS" + + # Record perf data + PERF_DATA="$OUTPUT_DIR/perf_$TIMESTAMP.data" + echo "Recording perf data to $PERF_DATA" + + ${pkgs.linuxPackages.perf}/bin/perf record \ + -F 997 \ + -g \ + --call-graph dwarf \ + -p "$(echo $PG_PIDS | tr ' ' ',')" \ + -o "$PERF_DATA" \ + sleep "$DURATION" + + # Generate flame graph + FLAME_SVG="$OUTPUT_DIR/postgres_flame_$TIMESTAMP.svg" + echo "Generating flame graph: $FLAME_SVG" + + ${pkgs.linuxPackages.perf}/bin/perf script -i "$PERF_DATA" | \ + ${pkgs.flamegraph}/bin/stackcollapse-perf.pl | \ + ${pkgs.flamegraph}/bin/flamegraph.pl \ + --title "PostgreSQL Flame Graph ($TIMESTAMP)" \ + --width 1200 \ + --height 800 \ + > "$FLAME_SVG" + + echo "Flame graph generated: $FLAME_SVG" + echo "Perf data saved: $PERF_DATA" + + # Generate summary report + REPORT="$OUTPUT_DIR/report_$TIMESTAMP.txt" + echo "Generating performance report: $REPORT" + + { + echo "PostgreSQL Performance Analysis Report" + echo "Generated: $(date)" + echo "Duration: ''${DURATION}s" + echo "Processes: $PG_PIDS" + echo "" + echo "=== Top Functions ===" + ${pkgs.linuxPackages.perf}/bin/perf report -i "$PERF_DATA" --stdio --sort comm,dso,symbol | head -50 + echo "" + echo "=== Call Graph ===" + ${pkgs.linuxPackages.perf}/bin/perf report -i "$PERF_DATA" --stdio -g --sort comm,dso,symbol | head -100 + } > "$REPORT" + + echo "Report generated: $REPORT" + echo "" + echo "Files created:" + echo " Flame graph: $FLAME_SVG" + echo " Perf data: $PERF_DATA" + echo " Report: $REPORT" + ''; + + # pgbench wrapper script + pgbenchScript = pkgs.writeScriptBin "pg-bench-run" '' + #!${pkgs.bash}/bin/bash + set -euo pipefail + + # Default parameters + CLIENTS=''${1:-10} + THREADS=''${2:-2} + TRANSACTIONS=''${3:-1000} + SCALE=''${4:-10} + DURATION=''${5:-60} + TEST_TYPE=''${6:-tpcb-like} + + OUTPUT_DIR="${config.pgBenchDir}" + TIMESTAMP=$(date +%Y%m%d_%H%M%S) + + mkdir -p "$OUTPUT_DIR" + + echo "=== PostgreSQL Benchmark Configuration ===" + echo "Clients: $CLIENTS" + echo "Threads: $THREADS" + echo "Transactions: $TRANSACTIONS" + echo "Scale factor: $SCALE" + echo "Duration: ''${DURATION}s" + echo "Test type: $TEST_TYPE" + echo "Output directory: $OUTPUT_DIR" + echo "============================================" + + # Check if PostgreSQL is running + if ! pgrep -f "postgres.*-D.*${config.pgDataDir}" >/dev/null; then + echo "Error: PostgreSQL is not running. Start it with 'pg-start'" + exit 1 + fi + + PGBENCH="${config.pgInstallDir}/bin/pgbench" + PSQL="${config.pgInstallDir}/bin/psql" + CREATEDB="${config.pgInstallDir}/bin/createdb" + DROPDB="${config.pgInstallDir}/bin/dropdb" + + DB_NAME="pgbench_test_$TIMESTAMP" + RESULTS_FILE="$OUTPUT_DIR/results_$TIMESTAMP.txt" + LOG_FILE="$OUTPUT_DIR/pgbench_$TIMESTAMP.log" + + echo "Creating test database: $DB_NAME" + "$CREATEDB" -h "${config.pgDataDir}" "$DB_NAME" || { + echo "Failed to create database" + exit 1 + } + + # Initialize pgbench tables + echo "Initializing pgbench tables (scale factor: $SCALE)" + "$PGBENCH" -h "${config.pgDataDir}" -i -s "$SCALE" "$DB_NAME" || { + echo "Failed to initialize pgbench tables" + "$DROPDB" -h "${config.pgDataDir}" "$DB_NAME" 2>/dev/null || true + exit 1 + } + + # Run benchmark based on test type + echo "Running benchmark..." + + case "$TEST_TYPE" in + "tpcb-like"|"default") + BENCH_ARGS="" + ;; + "select-only") + BENCH_ARGS="-S" + ;; + "simple-update") + BENCH_ARGS="-N" + ;; + "read-write") + BENCH_ARGS="-b select-only@70 -b tpcb-like@30" + ;; + *) + echo "Unknown test type: $TEST_TYPE" + echo "Available types: tpcb-like, select-only, simple-update, read-write" + "$DROPDB" -h "${config.pgDataDir}" "$DB_NAME" 2>/dev/null || true + exit 1 + ;; + esac + + { + echo "PostgreSQL Benchmark Results" + echo "Generated: $(date)" + echo "Test type: $TEST_TYPE" + echo "Clients: $CLIENTS, Threads: $THREADS" + echo "Transactions: $TRANSACTIONS, Duration: ''${DURATION}s" + echo "Scale factor: $SCALE" + echo "Database: $DB_NAME" + echo "" + echo "=== System Information ===" + echo "CPU: $(nproc) cores" + echo "Memory: $(free -h | grep '^Mem:' | awk '{print $2}')" + echo "Compiler: $CC" + echo "PostgreSQL version: $("$PSQL" --no-psqlrc -h "${config.pgDataDir}" -d "$DB_NAME" -t -c "SELECT version();" | head -1)" + echo "" + echo "=== Benchmark Results ===" + } > "$RESULTS_FILE" + + # Run the actual benchmark + "$PGBENCH" \ + -h "${config.pgDataDir}" \ + -c "$CLIENTS" \ + -j "$THREADS" \ + -T "$DURATION" \ + -P 5 \ + --log \ + --log-prefix="$OUTPUT_DIR/pgbench_$TIMESTAMP" \ + $BENCH_ARGS \ + "$DB_NAME" 2>&1 | tee -a "$RESULTS_FILE" + + # Collect additional statistics + { + echo "" + echo "=== Database Statistics ===" + "$PSQL" --no-psqlrc -h "${config.pgDataDir}" -d "$DB_NAME" -c " + SELECT + schemaname, + relname, + n_tup_ins as inserts, + n_tup_upd as updates, + n_tup_del as deletes, + n_live_tup as live_tuples, + n_dead_tup as dead_tuples + FROM pg_stat_user_tables; + " + + echo "" + echo "=== Index Statistics ===" + "$PSQL" --no-psqlrc -h "${config.pgDataDir}" -d "$DB_NAME" -c " + SELECT + schemaname, + relname, + indexrelname, + idx_scan, + idx_tup_read, + idx_tup_fetch + FROM pg_stat_user_indexes; + " + } >> "$RESULTS_FILE" + + # Clean up + echo "Cleaning up test database: $DB_NAME" + "$DROPDB" -h "${config.pgDataDir}" "$DB_NAME" 2>/dev/null || true + + echo "" + echo "Benchmark completed!" + echo "Results saved to: $RESULTS_FILE" + echo "Transaction logs: $OUTPUT_DIR/pgbench_$TIMESTAMP*" + + # Show summary + echo "" + echo "=== Quick Summary ===" + grep -E "(tps|latency)" "$RESULTS_FILE" | tail -5 + ''; + + # Development shell (GCC + glibc) + devShell = pkgs.mkShell { + name = "postgresql-dev"; + buildInputs = (getPostgreSQLDeps false) ++ [ + flameGraphScript + pgbenchScript + ]; + + shellHook = '' + # History configuration + export HISTFILE=.history + export HISTSIZE=1000000 + export HISTFILESIZE=1000000 + + # Clean environment + unset LD_LIBRARY_PATH LD_PRELOAD LIBRARY_PATH C_INCLUDE_PATH CPLUS_INCLUDE_PATH + + # Essential tools in PATH + export PATH="${pkgs.which}/bin:${pkgs.coreutils}/bin:$PATH" + + # Ccache configuration + export PATH=${pkgs.ccache}/bin:$PATH + export CCACHE_COMPILERCHECK=content + export CCACHE_DIR=$HOME/.ccache_pg_dev + mkdir -p "$CCACHE_DIR" + + # LLVM configuration + export LLVM_CONFIG="${llvmPkgs.llvm}/bin/llvm-config" + export PATH="${llvmPkgs.llvm}/bin:$PATH" + export PKG_CONFIG_PATH="${llvmPkgs.llvm.dev}/lib/pkgconfig:$PKG_CONFIG_PATH" + export LLVM_DIR="${llvmPkgs.llvm.dev}/lib/cmake/llvm" + export LLVM_ROOT="${llvmPkgs.llvm}" + + # Development tools in PATH + export PATH=${pkgs.clang-tools}/bin:$PATH + export PATH=${pkgs.cppcheck}/bin:$PATH + + # Development CFLAGS + # -DRELCACHE_FORCE_RELEASE -DCATCACHE_FORCE_RELEASE -fno-omit-frame-pointer -fno-stack-protector -DUSE_VALGRIND + export CFLAGS="" + export CXXFLAGS="" + + # GCC configuration (default compiler) + export CC="${pkgs.gcc}/bin/gcc" + export CXX="${pkgs.gcc}/bin/g++" + + # PostgreSQL environment + export PG_SOURCE_DIR="${config.pgSourceDir}" + export PG_BUILD_DIR="${config.pgBuildDir}" + export PG_INSTALL_DIR="${config.pgInstallDir}" + export PG_DATA_DIR="${config.pgDataDir}" + export PG_BENCH_DIR="${config.pgBenchDir}" + export PG_FLAME_DIR="${config.pgFlameDir}" + export PERL_CORE_DIR=$(find ${pkgs.perl} -maxdepth 5 -path "*/CORE" -type d) + + # GDB configuration + export GDBINIT="${gdbConfig}" + + # Performance tools in PATH + export PATH="${flameGraphScript}/bin:${pgbenchScript}/bin:$PATH" + + # Create output directories + mkdir -p "$PG_BENCH_DIR" "$PG_FLAME_DIR" + + # Compiler verification + echo "Environment configured:" + echo " Compiler: $CC" + echo " LibC: glibc" + echo " LLVM: $(llvm-config --version 2>/dev/null || echo 'not available')" + echo " Ccache: enabled ($CCACHE_DIR)" + + # Load PostgreSQL development aliases + if [ -f ./pg-aliases.sh ]; then + source ./pg-aliases.sh + else + echo "Warning: pg-aliases.sh not found in current directory" + fi + + echo "" + echo "PostgreSQL Development Environment Ready (GCC + glibc)" + echo "Run 'pg-info' for available commands" + ''; + }; + + # Clang + glibc variant + clangDevShell = pkgs.mkShell { + name = "postgresql-clang-glibc"; + buildInputs = (getPostgreSQLDeps false) ++ [ + llvmPkgs.clang + llvmPkgs.lld + llvmPkgs.compiler-rt + flameGraphScript + pgbenchScript + ]; + + shellHook = '' + export HISTFILE=.history + export HISTSIZE=1000000 + export HISTFILESIZE=1000000 + + unset LD_LIBRARY_PATH LD_PRELOAD LIBRARY_PATH C_INCLUDE_PATH CPLUS_INCLUDE_PATH + + export PATH="${pkgs.which}/bin:${pkgs.coreutils}/bin:$PATH" + + # Ccache configuration + export PATH=${pkgs.ccache}/bin:$PATH + export CCACHE_COMPILERCHECK=content + export CCACHE_DIR=$HOME/.ccache_pg_dev_clang + mkdir -p "$CCACHE_DIR" + + # LLVM configuration + export LLVM_CONFIG="${llvmPkgs.llvm}/bin/llvm-config" + export PATH="${llvmPkgs.llvm}/bin:$PATH" + export PKG_CONFIG_PATH="${llvmPkgs.llvm.dev}/lib/pkgconfig:$PKG_CONFIG_PATH" + export LLVM_DIR="${llvmPkgs.llvm.dev}/lib/cmake/llvm" + export LLVM_ROOT="${llvmPkgs.llvm}" + + # Development tools in PATH + export PATH=${pkgs.clang-tools}/bin:$PATH + export PATH=${pkgs.cppcheck}/bin:$PATH + + # Clang + glibc configuration - use system linker instead of LLD for compatibility + export CC="${llvmPkgs.clang}/bin/clang" + export CXX="${llvmPkgs.clang}/bin/clang++" + + # Use system linker and standard runtime + #export CFLAGS="" + #export CXXFLAGS="" + #export LDFLAGS="" + + # PostgreSQL environment + export PG_SOURCE_DIR="${config.pgSourceDir}" + export PG_BUILD_DIR="${config.pgBuildDir}-clang" + export PG_INSTALL_DIR="${config.pgInstallDir}-clang" + export PG_DATA_DIR="${config.pgDataDir}-clang" + export PG_BENCH_DIR="${config.pgBenchDir}" + export PG_FLAME_DIR="${config.pgFlameDir}" + export PERL_CORE_DIR=$(find ${pkgs.perl} -maxdepth 5 -path "*/CORE" -type d) + + # GDB configuration + export GDBINIT="${gdbConfig}" + + # Performance tools in PATH + export PATH="${flameGraphScript}/bin:${pgbenchScript}/bin:$PATH" + + # Create output directories + mkdir -p "$PG_BENCH_DIR" "$PG_FLAME_DIR" + + # Compiler verification + echo "Environment configured:" + echo " Compiler: $CC" + echo " LibC: glibc" + echo " LLVM: $(llvm-config --version 2>/dev/null || echo 'not available')" + echo " Ccache: enabled ($CCACHE_DIR)" + + # Load PostgreSQL development aliases + if [ -f ./pg-aliases.sh ]; then + source ./pg-aliases.sh + else + echo "Warning: pg-aliases.sh not found in current directory" + fi + + echo "" + echo "PostgreSQL Development Environment Ready (Clang + glibc)" + echo "Run 'pg-info' for available commands" + ''; + }; + + # GCC + musl variant (cross-compilation) + muslDevShell = pkgs.mkShell { + name = "postgresql-gcc-musl"; + buildInputs = (getPostgreSQLDeps true) ++ [ + pkgs.gcc + flameGraphScript + pgbenchScript + ]; + + shellHook = '' + # Same base configuration as main shell + export HISTFILE=.history + export HISTSIZE=1000000 + export HISTFILESIZE=1000000 + + unset LD_LIBRARY_PATH LD_PRELOAD LIBRARY_PATH C_INCLUDE_PATH CPLUS_INCLUDE_PATH + + export PATH="${pkgs.which}/bin:${pkgs.coreutils}/bin:$PATH" + + # Cross-compilation to musl + export CC="${pkgs.gcc}/bin/gcc" + export CXX="${pkgs.gcc}/bin/g++" + + # Point to musl libraries for linking + export PKG_CONFIG_PATH="${pkgs.pkgsMusl.openssl.dev}/lib/pkgconfig:${pkgs.pkgsMusl.zlib.dev}/lib/pkgconfig:${pkgs.pkgsMusl.icu.dev}/lib/pkgconfig" + export CFLAGS="-ggdb -Og -fno-omit-frame-pointer -DUSE_VALGRIND -D_FORTIFY_SOURCE=1 -I${pkgs.pkgsMusl.stdenv.cc.libc}/include" + export CXXFLAGS="-ggdb -Og -fno-omit-frame-pointer -DUSE_VALGRIND -D_FORTIFY_SOURCE=1 -I${pkgs.pkgsMusl.stdenv.cc.libc}/include" + export LDFLAGS="-L${pkgs.pkgsMusl.stdenv.cc.libc}/lib -static-libgcc" + + # PostgreSQL environment + export PG_SOURCE_DIR="${config.pgSourceDir}" + export PG_BUILD_DIR="${config.pgBuildDir}-musl" + export PG_INSTALL_DIR="${config.pgInstallDir}-musl" + export PG_DATA_DIR="${config.pgDataDir}-musl" + export PG_BENCH_DIR="${config.pgBenchDir}" + export PG_FLAME_DIR="${config.pgFlameDir}" + export PERL_CORE_DIR=$(find ${pkgs.perl} -maxdepth 5 -path "*/CORE" -type d) + + export GDBINIT="${gdbConfig}" + export PATH="${flameGraphScript}/bin:${pgbenchScript}/bin:$PATH" + + mkdir -p "$PG_BENCH_DIR" "$PG_FLAME_DIR" + + echo "GCC + musl environment configured" + echo " Compiler: $CC" + echo " LibC: musl (cross-compilation)" + + if [ -f ./pg-aliases.sh ]; then + source ./pg-aliases.sh + fi + + echo "PostgreSQL Development Environment Ready (GCC + musl)" + ''; + }; + + # Clang + musl variant (cross-compilation) + clangMuslDevShell = pkgs.mkShell { + name = "postgresql-clang-musl"; + buildInputs = (getPostgreSQLDeps true) ++ [ + llvmPkgs.clang + llvmPkgs.lld + flameGraphScript + pgbenchScript + ]; + + shellHook = '' + export HISTFILE=.history + export HISTSIZE=1000000 + export HISTFILESIZE=1000000 + + unset LD_LIBRARY_PATH LD_PRELOAD LIBRARY_PATH C_INCLUDE_PATH CPLUS_INCLUDE_PATH + + export PATH="${pkgs.which}/bin:${pkgs.coreutils}/bin:$PATH" + + # Cross-compilation to musl with clang + export CC="${llvmPkgs.clang}/bin/clang" + export CXX="${llvmPkgs.clang}/bin/clang++" + + # Point to musl libraries for linking + export PKG_CONFIG_PATH="${pkgs.pkgsMusl.openssl.dev}/lib/pkgconfig:${pkgs.pkgsMusl.zlib.dev}/lib/pkgconfig:${pkgs.pkgsMusl.icu.dev}/lib/pkgconfig" + export CFLAGS="--target=x86_64-linux-musl -ggdb -Og -fno-omit-frame-pointer -DUSE_VALGRIND -D_FORTIFY_SOURCE=1 -I${pkgs.pkgsMusl.stdenv.cc.libc}/include" + export CXXFLAGS="--target=x86_64-linux-musl -ggdb -Og -fno-omit-frame-pointer -DUSE_VALGRIND -D_FORTIFY_SOURCE=1 -I${pkgs.pkgsMusl.stdenv.cc.libc}/include" + export LDFLAGS="--target=x86_64-linux-musl -L${pkgs.pkgsMusl.stdenv.cc.libc}/lib -fuse-ld=lld" + + # PostgreSQL environment + export PG_SOURCE_DIR="${config.pgSourceDir}" + export PG_BUILD_DIR="${config.pgBuildDir}-clang-musl" + export PG_INSTALL_DIR="${config.pgInstallDir}-clang-musl" + export PG_DATA_DIR="${config.pgDataDir}-clang-musl" + export PG_BENCH_DIR="${config.pgBenchDir}" + export PG_FLAME_DIR="${config.pgFlameDir}" + export PERL_CORE_DIR=$(find ${pkgs.perl} -maxdepth 5 -path "*/CORE" -type d) + + export GDBINIT="${gdbConfig}" + export PATH="${flameGraphScript}/bin:${pgbenchScript}/bin:$PATH" + + mkdir -p "$PG_BENCH_DIR" "$PG_FLAME_DIR" + + echo "Clang + musl environment configured" + echo " Compiler: $CC" + echo " LibC: musl (cross-compilation)" + + if [ -f ./pg-aliases.sh ]; then + source ./pg-aliases.sh + fi + + echo "PostgreSQL Development Environment Ready (Clang + musl)" + ''; + }; + +in { + inherit devShell clangDevShell muslDevShell clangMuslDevShell gdbConfig flameGraphScript pgbenchScript; +} diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 5d85dcc62f0a5..12333b5b9a5f7 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -1232,7 +1232,7 @@ spawn_process(const char *cmdline) char *cmdline2; cmdline2 = psprintf("exec %s", cmdline); - execl(shellprog, shellprog, "-c", cmdline2, (char *) NULL); + execlp(shellprog, shellprog, "-c", cmdline2, (char *) NULL); /* Not using the normal bail() here as we want _exit */ bail_noatexit("could not exec \"%s\": %m", shellprog); } From 79efe284e875d9befe804f6fa8c211fc1ff48b03 Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Tue, 22 Jul 2025 10:23:10 -0400 Subject: [PATCH 2/7] win github workflows --- .github/workflows/all.yml | 46 +++++ .github/workflows/bundle-deps.yml | 196 +++++++++++++++++++++ .github/workflows/diffutils.yml | 40 +++++ .github/workflows/gettext.yml | 91 ++++++++++ .github/workflows/icu.yml | 90 ++++++++++ .github/workflows/krb5.yml | 81 +++++++++ .github/workflows/libiconv.yml | 69 ++++++++ .github/workflows/libxml2.yml | 65 +++++++ .github/workflows/libxslt.yml | 76 +++++++++ .github/workflows/lz4.yml | 77 +++++++++ .github/workflows/manifest.yml | 102 +++++++++++ .github/workflows/meson.yml | 41 +++++ .github/workflows/ninja.yml | 37 ++++ .github/workflows/openssl.yml | 77 +++++++++ .github/workflows/ossp-uuid.yml | 95 +++++++++++ .github/workflows/pkgconf.yml | 88 ++++++++++ .github/workflows/postgresql-dev.yml | 169 +++++++++++++++++++ .github/workflows/postgresql.yml | 243 +++++++++++++++++++++++++++ .github/workflows/winflexbison.yml | 40 +++++ .github/workflows/zlib.yml | 82 +++++++++ .github/workflows/zstd.yml | 85 ++++++++++ manifest.json | 112 ++++++++++++ 22 files changed, 2002 insertions(+) create mode 100644 .github/workflows/all.yml create mode 100644 .github/workflows/bundle-deps.yml create mode 100644 .github/workflows/diffutils.yml create mode 100644 .github/workflows/gettext.yml create mode 100644 .github/workflows/icu.yml create mode 100644 .github/workflows/krb5.yml create mode 100644 .github/workflows/libiconv.yml create mode 100644 .github/workflows/libxml2.yml create mode 100644 .github/workflows/libxslt.yml create mode 100644 .github/workflows/lz4.yml create mode 100644 .github/workflows/manifest.yml create mode 100644 .github/workflows/meson.yml create mode 100644 .github/workflows/ninja.yml create mode 100644 .github/workflows/openssl.yml create mode 100644 .github/workflows/ossp-uuid.yml create mode 100644 .github/workflows/pkgconf.yml create mode 100644 .github/workflows/postgresql-dev.yml create mode 100644 .github/workflows/postgresql.yml create mode 100644 .github/workflows/winflexbison.yml create mode 100644 .github/workflows/zlib.yml create mode 100644 .github/workflows/zstd.yml create mode 100644 manifest.json diff --git a/.github/workflows/all.yml b/.github/workflows/all.yml new file mode 100644 index 0000000000000..3713bf701ccbe --- /dev/null +++ b/.github/workflows/all.yml @@ -0,0 +1,46 @@ +name: Build all (WIP) + +on: + workflow_dispatch: + +env: + GITHUB_TOKEN: ${{ secrets.ACTION_API_PAT }} + +jobs: + + # Github can only have nesting of up to 20 actions in a single action + # run. We'll need way more than that, as we have actions to get the + # version, setup the compiler, upload builds and far more. So, we need + # to manually call other actions using the API to build everything. + package-tools: + runs-on: ubuntu-latest + steps: + - name: package-diffutils + run: | + curl -s -X POST -H "Authorization: Bearer ${{ env.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" -d '{"ref":"main"}' \ + https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/diffutils.yml/dispatches + + - name: package-meson + run: | + curl -s -X POST -H "Authorization: Bearer ${{ env.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" -d '{"ref":"main"}' \ + https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/meson.yml/dispatches + + - name: package-ninja + run: | + curl -s -X POST -H "Authorization: Bearer ${{ env.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" -d '{"ref":"main"}' \ + https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/ninja.yml/dispatches + + - name: package-pkgconf + run: | + curl -s -X POST -H "Authorization: Bearer ${{ env.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" -d '{"ref":"main"}' \ + https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/pkgconf.yml/dispatches + + - name: package-winflexbison + run: | + curl -s -X POST -H "Authorization: Bearer ${{ env.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" -d '{"ref":"main"}' \ + https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/winflexbison.yml/dispatches diff --git a/.github/workflows/bundle-deps.yml b/.github/workflows/bundle-deps.yml new file mode 100644 index 0000000000000..ae0050c0e6c41 --- /dev/null +++ b/.github/workflows/bundle-deps.yml @@ -0,0 +1,196 @@ +name: Bundle dependencies + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 04 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-all-deps: + needs: get-versions + env: + DIFFUTILS_VERSION: ${{needs.get-versions.outputs.DIFFUTILS_VERSION}} + GETTEXT_VERSION: ${{needs.get-versions.outputs.GETTEXT_VERSION}} + ICU_VERSION: ${{needs.get-versions.outputs.ICU_VERSION}} + KRB5_VERSION: ${{needs.get-versions.outputs.KRB5_VERSION}} + LIBICONV_VERSION: ${{needs.get-versions.outputs.LIBICONV_VERSION}} + LIBXML2_VERSION: ${{needs.get-versions.outputs.LIBXML2_VERSION}} + LIBXSLT_VERSION: ${{needs.get-versions.outputs.LIBXSLT_VERSION}} + LZ4_VERSION: ${{needs.get-versions.outputs.LZ4_VERSION}} + MESON_VERSION: ${{needs.get-versions.outputs.MESON_VERSION}} + NINJA_VERSION: ${{needs.get-versions.outputs.NINJA_VERSION}} + OPENSSL_VERSION: ${{needs.get-versions.outputs.OPENSSL_VERSION}} + OSSP-UUID_VERSION: ${{needs.get-versions.outputs.OSSP-UUID_VERSION}} + PKGCONF_VERSION: ${{needs.get-versions.outputs.PKGCONF_VERSION}} + WINFLEXBISON_VERSION: ${{needs.get-versions.outputs.WINFLEXBISON_VERSION}} + ZLIB_VERSION: ${{needs.get-versions.outputs.ZLIB_VERSION}} + ZSTD_VERSION: ${{needs.get-versions.outputs.ZSTD_VERSION}} + + + runs-on: windows-latest + steps: + - name: Download diffutils + uses: dawidd6/action-download-artifact@v3 + with: + workflow: diffutils.yml + workflow_conclusion: success + name: diffutils-${{ env.DIFFUTILS_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download gettext + uses: dawidd6/action-download-artifact@v3 + with: + workflow: gettext.yml + workflow_conclusion: success + name: gettext-${{ env.GETTEXT_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download icu + uses: dawidd6/action-download-artifact@v3 + with: + workflow: icu.yml + workflow_conclusion: success + name: icu-${{ env.ICU_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download krb5 + uses: dawidd6/action-download-artifact@v3 + with: + workflow: krb5.yml + workflow_conclusion: success + name: krb5-${{ env.KRB5_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download libiconv + uses: dawidd6/action-download-artifact@v3 + with: + workflow: libiconv.yml + workflow_conclusion: success + name: libiconv-${{ env.LIBICONV_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download libxml2 + uses: dawidd6/action-download-artifact@v3 + with: + workflow: libxml2.yml + workflow_conclusion: success + name: libxml2-${{ env.LIBXML2_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download libxslt + uses: dawidd6/action-download-artifact@v3 + with: + workflow: libxslt.yml + workflow_conclusion: success + name: libxslt-${{ env.LIBXSLT_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download lz4 + uses: dawidd6/action-download-artifact@v3 + with: + workflow: lz4.yml + workflow_conclusion: success + name: lz4-${{ env.LZ4_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download meson + uses: dawidd6/action-download-artifact@v3 + with: + workflow: meson.yml + workflow_conclusion: success + name: meson-${{ env.MESON_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download ninja + uses: dawidd6/action-download-artifact@v3 + with: + workflow: ninja.yml + workflow_conclusion: success + name: ninja-${{ env.NINJA_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download openssl + uses: dawidd6/action-download-artifact@v3 + with: + workflow: openssl.yml + workflow_conclusion: success + name: openssl-${{ env.OPENSSL_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download ossp-uuid + uses: dawidd6/action-download-artifact@v3 + with: + workflow: ossp-uuid.yml + workflow_conclusion: success + name: ossp-uuid-${{ env.OSSP-UUID_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download pkgconf + uses: dawidd6/action-download-artifact@v3 + with: + workflow: pkgconf.yml + workflow_conclusion: success + name: pkgconf-${{ env.PKGCONF_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download winflexbison + uses: dawidd6/action-download-artifact@v3 + with: + workflow: winflexbison.yml + workflow_conclusion: success + name: winflexbison-${{ env.WINFLEXBISON_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download zlib + uses: dawidd6/action-download-artifact@v3 + with: + workflow: zlib.yml + workflow_conclusion: success + name: zlib-${{ env.ZLIB_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download zstd + uses: dawidd6/action-download-artifact@v3 + with: + workflow: zstd.yml + workflow_conclusion: success + name: zstd-${{ env.ZSTD_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + # github errors out due to too many files :( + - name: Remove docs + run: | + rm.exe -rf ` + \builddeps\share\docs ` + \builddeps\mesonbuild\docs ` + "\builddeps\mesonbuild\test cases" ` + \builddeps\html + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: all-deps-win64 + path: /builddeps diff --git a/.github/workflows/diffutils.yml b/.github/workflows/diffutils.yml new file mode 100644 index 0000000000000..a4aa660436b97 --- /dev/null +++ b/.github/workflows/diffutils.yml @@ -0,0 +1,40 @@ +name: Package diffutils + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 00 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + package-diffutils: + runs-on: windows-latest + needs: get-versions + env: + DIFFUTILS_VERSION: ${{needs.get-versions.outputs.DIFFUTILS_VERSION}} + + steps: + - name: Download + run: | + curl -L -o diffutils.zip "https://downloads.sourceforge.net/project/gnuwin32/diffutils/${{ env.DIFFUTILS_VERSION }}/diffutils-${{ env.DIFFUTILS_VERSION }}-bin.zip" + mkdir diffutils + cd diffutils + unzip ../diffutils.zip + + - name: Download dependencies + run: | + curl -L -o diffutils-deps.zip "https://downloads.sourceforge.net/project/gnuwin32/diffutils/${{ env.DIFFUTILS_VERSION }}/diffutils-${{ env.DIFFUTILS_VERSION }}-dep.zip" + cd diffutils + unzip ../diffutils-deps.zip + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: diffutils-${{ env.DIFFUTILS_VERSION }}-win64 + path: diffutils diff --git a/.github/workflows/gettext.yml b/.github/workflows/gettext.yml new file mode 100644 index 0000000000000..b6d14ab67f408 --- /dev/null +++ b/.github/workflows/gettext.yml @@ -0,0 +1,91 @@ +name: Build gettext + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 02 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + build-gettext: + runs-on: windows-latest + needs: get-versions + defaults: + run: + shell: msys2 {0} + env: + LIBICONV_VERSION: ${{needs.get-versions.outputs.LIBICONV_VERSION}} + GETTEXT_VERSION: ${{needs.get-versions.outputs.GETTEXT_VERSION}} + steps: + - uses: msys2/setup-msys2@v2 + with: + msystem: UCRT64 + update: true + install: mingw-w64-ucrt-x86_64-gcc automake autoconf pkg-config make zip patch tar + + - name: Download libiconv + uses: dawidd6/action-download-artifact@v3 + with: + workflow: libiconv.yml + workflow_conclusion: success + name: libiconv-${{ env.LIBICONV_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download + run: | + curl https://ftp.gnu.org/pub/gnu/gettext/gettext-${{ env.GETTEXT_VERSION }}.tar.gz -o ./gettext-${{ env.GETTEXT_VERSION }}.tar.gz + tar zxvf gettext-${{ env.GETTEXT_VERSION }}.tar.gz + + - name: Patch + run: | + curl https://raw.githubusercontent.com/vslavik/gettext-tools-windows/refs/heads/master/patches/gettext-0.23-pthread_sigmask.patch -o gettext-0.23-pthread_sigmask.patch + cd gettext-${{ env.GETTEXT_VERSION }} + patch -p1 < ../gettext-0.23-pthread_sigmask.patch + + - name: Configure + run: | + cd gettext-${{ env.GETTEXT_VERSION }} + ./configure CFLAGS="$CFLAGS -Wno-error=incompatible-pointer-types -Wno-error=implicit-function-declaration" \ + --prefix=$(pwd)/winpgbuild \ + --disable-static \ + --disable-dependency-tracking \ + --enable-silent-rules \ + --with-libiconv-prefix=/builddeps \ + --disable-rpath \ + --enable-nls \ + --disable-csharp \ + --disable-java \ + --enable-threads=windows \ + --enable-relocatable + + - name: Build + run: | + cd gettext-${{ env.GETTEXT_VERSION }} + make all + + - name: Install + run: | + cd gettext-${{ env.GETTEXT_VERSION }} + make install + + cp winpgbuild/lib/libintl.dll.a winpgbuild/lib/libintl.lib + cp winpgbuild/lib/libasprintf.dll.a winpgbuild/lib/libasprintf.lib + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: gettext-${{ env.GETTEXT_VERSION }}-src + path: gettext-${{ env.GETTEXT_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: gettext-${{ env.GETTEXT_VERSION }}-win64 + path: gettext-${{ env.GETTEXT_VERSION }}/winpgbuild diff --git a/.github/workflows/icu.yml b/.github/workflows/icu.yml new file mode 100644 index 0000000000000..9aaaf7ad6d65c --- /dev/null +++ b/.github/workflows/icu.yml @@ -0,0 +1,90 @@ +name: Build ICU + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-icu: + runs-on: windows-latest + needs: get-versions + env: + ICU_VERSION: ${{needs.get-versions.outputs.ICU_VERSION}} + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download + run: | + curl -L https://github.com/unicode-org/icu/releases/download/release-$(echo ${{ env.ICU_VERSION }} | sed 's/\./-/g')/icu4c-$(echo ${{ env.ICU_VERSION }} | sed 's/\./_/g')-src.tgz -o ./icu4c-${{ env.ICU_VERSION }}.tar.gz + tar zxvf ./icu4c-${{ env.ICU_VERSION }}.tar.gz + shell: bash + + - name: Build + run: | + cd icu + devenv.com source\allinone\allinone.sln /build "Release|x64" + + - name: Install + run: | + cd icu + + mkdir \icu + + mkdir \icu\bin64 + copy bin64\* \icu\bin64\ + + mkdir \icu\include + mkdir \icu\include/unicode + copy include\unicode\* \icu\include\unicode\/ + + mkdir \icu\lib64 + copy lib64\* \icu\lib64\ + + - name: Create pkgconfig Files + run: | + mkdir \icu\lib + mkdir \icu\lib\pkgconfig + + >> \icu\lib\pkgconfig\icu-uc.pc echo prefix=${pcfiledir}/../.. + >> \icu\lib\pkgconfig\icu-uc.pc echo exec_prefix=${prefix} + >> \icu\lib\pkgconfig\icu-uc.pc echo libdir=${prefix}/lib64 + >> \icu\lib\pkgconfig\icu-uc.pc echo includedir=${prefix}/include + >> \icu\lib\pkgconfig\icu-uc.pc echo. + >> \icu\lib\pkgconfig\icu-uc.pc echo Name: icu-uc + >> \icu\lib\pkgconfig\icu-uc.pc echo Description: International Components for Unicode: Common and Data libraries + >> \icu\lib\pkgconfig\icu-uc.pc echo Version: ${{ env.ICU_VERSION }} + >> \icu\lib\pkgconfig\icu-uc.pc echo Libs: -L${libdir} -licuuc -licudt + >> \icu\lib\pkgconfig\icu-uc.pc echo Cflags: -I${includedir} + + >> \icu\lib\pkgconfig\icu-i18n.pc echo prefix=${pcfiledir}/../.. + >> \icu\lib\pkgconfig\icu-i18n.pc echo exec_prefix=${prefix} + >> \icu\lib\pkgconfig\icu-i18n.pc echo libdir=${prefix}/lib64 + >> \icu\lib\pkgconfig\icu-i18n.pc echo includedir=${prefix}/include + >> \icu\lib\pkgconfig\icu-i18n.pc echo. + >> \icu\lib\pkgconfig\icu-i18n.pc echo Name: icu-i18n + >> \icu\lib\pkgconfig\icu-i18n.pc echo Description: International Components for Unicode: Internationalization library + >> \icu\lib\pkgconfig\icu-i18n.pc echo Version: ${{ env.ICU_VERSION }} + >> \icu\lib\pkgconfig\icu-i18n.pc echo Libs: -L${libdir} -licuin + >> \icu\lib\pkgconfig\icu-i18n.pc echo Cflags: -I${includedir} + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: icu-${{ env.ICU_VERSION }}-src + path: icu4c-${{ env.ICU_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: icu-${{ env.ICU_VERSION }}-win64 + path: /icu diff --git a/.github/workflows/krb5.yml b/.github/workflows/krb5.yml new file mode 100644 index 0000000000000..7324602551e4a --- /dev/null +++ b/.github/workflows/krb5.yml @@ -0,0 +1,81 @@ +name: Build KRB5 + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-krb5: + runs-on: windows-latest + needs: get-versions + env: + KRB5_VERSION: ${{needs.get-versions.outputs.KRB5_VERSION}} + + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download + run: | + curl https://kerberos.org/dist/krb5/1.21/krb5-${{ env.KRB5_VERSION }}.tar.gz -o ./krb5-${{ env.KRB5_VERSION }}.tar.gz + tar zxvf krb5-${{ env.KRB5_VERSION }}.tar.gz + + - name: Configure + run: | + cd krb5-${{ env.KRB5_VERSION }}\src + nmake -f Makefile.in prep-windows + + - name: Build + run: | + cd krb5-${{ env.KRB5_VERSION }}\src + $env:PATH += ";" + $env:WindowsSdkVerBinPath + "\x86" + nmake NODEBUG=1 + + - name: Install + # We're only doing a 64 bit build, so we need to tweak things at the + # end of the installation to make it look as it would if we had built + # both 32 and 64 bit together. + run: | + mkdir \krb5 + cd krb5-${{ env.KRB5_VERSION }}\src + $env:KRB_INSTALL_DIR = "\krb5" + nmake install NODEBUG=1 + + mkdir \krb5\lib\amd64 + move \krb5\lib\*.lib \krb5\lib\amd64\ + + - name: Create pkgconfig File + run: | + mkdir \krb5\lib\pkgconfig + + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo prefix=${pcfiledir}/../.. + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo exec_prefix=${prefix} + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo libdir=${prefix}/lib + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo includedir=${prefix}/include + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo. + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo Name: krb5-gssapi + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo Version: ${{ env.KRB5_VERSION }} + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo Description: MIT kerberos library + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo Libs: -L${libdir}/amd64 -lkrb5_64 -lgssapi64 -lcomerr64 + >> \krb5\lib\pkgconfig\krb5-gssapi.pc echo Cflags: -I${includedir} -I${includedir}/krb5 + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: krb5-${{ env.KRB5_VERSION }}-src + path: krb5-${{ env.KRB5_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: krb5-${{ env.KRB5_VERSION }}-win64 + path: /krb5 diff --git a/.github/workflows/libiconv.yml b/.github/workflows/libiconv.yml new file mode 100644 index 0000000000000..55ecfac4ff55b --- /dev/null +++ b/.github/workflows/libiconv.yml @@ -0,0 +1,69 @@ +name: Build libiconv + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-libiconv: + runs-on: windows-latest + needs: get-versions + defaults: + run: + shell: msys2 {0} + + env: + LIBICONV_VERSION: ${{needs.get-versions.outputs.LIBICONV_VERSION}} + steps: + - uses: msys2/setup-msys2@v2 + with: + msystem: UCRT64 + update: true + install: mingw-w64-ucrt-x86_64-gcc automake autoconf pkg-config make zip patch tar + + - name: Download + run: | + curl https://ftp.gnu.org/pub/gnu/libiconv/libiconv-${{ env.LIBICONV_VERSION }}.tar.gz -o ./libiconv-${{ env.LIBICONV_VERSION }}.tar.gz + tar zxvf libiconv-${{ env.LIBICONV_VERSION }}.tar.gz + - name: Configure + run: | + cd libiconv-${{ env.LIBICONV_VERSION }} + ./configure --prefix=$(pwd)/winpgbuild \ + --disable-static \ + --disable-dependency-tracking \ + --disable-rpath \ + --disable-nls + + - name: Build + run: | + cd libiconv-${{ env.LIBICONV_VERSION }} + make all + + - name: Install + run: | + cd libiconv-${{ env.LIBICONV_VERSION }} + make install + + cp winpgbuild/lib/libiconv.dll.a winpgbuild/lib/iconv.lib + cp winpgbuild/lib/libcharset.dll.a winpgbuild/lib/charset.lib + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libiconv-${{ env.LIBICONV_VERSION }}-src + path: libiconv-${{ env.LIBICONV_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libiconv-${{ env.LIBICONV_VERSION }}-win64 + path: libiconv-${{ env.LIBICONV_VERSION }}/winpgbuild diff --git a/.github/workflows/libxml2.yml b/.github/workflows/libxml2.yml new file mode 100644 index 0000000000000..49b52249a081d --- /dev/null +++ b/.github/workflows/libxml2.yml @@ -0,0 +1,65 @@ +name: Build libxml2 + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 02 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-libxml2: + runs-on: windows-latest + needs: get-versions + env: + ZLIB_VERSION: ${{needs.get-versions.outputs.ZLIB_VERSION}} + LIBXML2_VERSION: ${{needs.get-versions.outputs.LIBXML2_VERSION}} + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download zlib + uses: dawidd6/action-download-artifact@v3 + with: + workflow: zlib.yml + workflow_conclusion: success + name: zlib-${{ env.ZLIB_VERSION }}-win64 + path: /build/zlib + if_no_artifact_found: fail + + - name: Download + run: | + curl https://gitlab.gnome.org/GNOME/libxml2/-/archive/v${{ env.LIBXML2_VERSION }}/libxml2-v${{ env.LIBXML2_VERSION }}.tar.gz -o ./libxml2-v${{ env.LIBXML2_VERSION }}.tar.gz + tar zxvf libxml2-v${{ env.LIBXML2_VERSION }}.tar.gz + + - name: Configure + run: | + cd libxml2-v${{ env.LIBXML2_VERSION }} + cmake -B build -D CMAKE_INSTALL_PREFIX=/libxml2 -D CMAKE_PREFIX_PATH="/build/zlib" -D LIBXML2_WITH_ICONV=OFF -D LIBXML2_WITH_LZMA=OFF -D LIBXML2_WITH_PYTHON=OFF + + - name: Build + run: | + cd libxml2-v${{ env.LIBXML2_VERSION }} + cmake --build build --config Release + + - name: Install + run: | + cd libxml2-v${{ env.LIBXML2_VERSION }} + cmake --install build + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libxml2-v${{ env.LIBXML2_VERSION }}-src + path: libxml2-v${{ env.LIBXML2_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libxml2-${{ env.LIBXML2_VERSION }}-win64 + path: /libxml2 diff --git a/.github/workflows/libxslt.yml b/.github/workflows/libxslt.yml new file mode 100644 index 0000000000000..446c73ebb2159 --- /dev/null +++ b/.github/workflows/libxslt.yml @@ -0,0 +1,76 @@ +name: Build libxslt + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 03 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-libxslt: + runs-on: windows-latest + needs: get-versions + env: + LIBXML2_VERSION: ${{needs.get-versions.outputs.LIBXML2_VERSION}} + LIBXSLT_VERSION: ${{needs.get-versions.outputs.LIBXSLT_VERSION}} + ZLIB_VERSION: ${{needs.get-versions.outputs.ZLIB_VERSION}} + + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download libxml2 + uses: dawidd6/action-download-artifact@v3 + with: + workflow: libxml2.yml + workflow_conclusion: success + name: libxml2-${{ env.LIBXML2_VERSION }}-win64 + path: /build/libxml2 + if_no_artifact_found: fail + + - name: Download zlib + uses: dawidd6/action-download-artifact@v3 + with: + workflow: zlib.yml + workflow_conclusion: success + name: zlib-${{ env.ZLIB_VERSION }}-win64 + path: /build/zlib + if_no_artifact_found: fail + + - name: Download + run: | + curl https://gitlab.gnome.org/GNOME/libxslt/-/archive/v${{ env.LIBXSLT_VERSION }}/libxslt-v${{ env.LIBXSLT_VERSION }}.tar.gz -o ./libxslt-v${{ env.LIBXSLT_VERSION }}.tar.gz + tar zxvf libxslt-v${{ env.LIBXSLT_VERSION }}.tar.gz + + - name: Configure + run: | + cd libxslt-v${{ env.LIBXSLT_VERSION }} + cmake -B build -D CMAKE_INSTALL_PREFIX=/libxslt -D CMAKE_PREFIX_PATH="/build/zlib;/build/libxml2" + + - name: Build + run: | + cd libxslt-v${{ env.LIBXSLT_VERSION }} + cmake --build build --config Release + + - name: Install + run: | + cd libxslt-v${{ env.LIBXSLT_VERSION }} + cmake --install build + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libxslt-v${{ env.LIBXSLT_VERSION }}-src + path: libxslt-v${{ env.LIBXSLT_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: libxslt-${{ env.LIBXSLT_VERSION }}-win64 + path: /libxslt diff --git a/.github/workflows/lz4.yml b/.github/workflows/lz4.yml new file mode 100644 index 0000000000000..cbaf9de6f08b1 --- /dev/null +++ b/.github/workflows/lz4.yml @@ -0,0 +1,77 @@ +name: Build lz4 + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-lz4: + runs-on: windows-latest + needs: get-versions + env: + LZ4_VERSION: ${{needs.get-versions.outputs.LZ4_VERSION}} + steps: + - uses: microsoft/setup-msbuild@v2 + + - name: Download + run: | + curl -L https://github.com/lz4/lz4/releases/download/v${{ env.LZ4_VERSION }}/lz4-${{ env.LZ4_VERSION }}.tar.gz -o ./lz4-${{ env.LZ4_VERSION }}.tar.gz + tar zxvf lz4-${{ env.LZ4_VERSION }}.tar.gz + + - name: Build + run: | + cd lz4-${{ env.LZ4_VERSION }} + msbuild build\VS2022\lz4.sln -target:liblz4-dll /property:Configuration=Release /property:Platform=x64 + + - name: Install + run: | + cd lz4-${{ env.LZ4_VERSION }} + + mkdir \lz4 + + mkdir \lz4\bin + copy build\VS2022\bin\x64_Release\liblz4.dll /lz4/bin/ + copy build\VS2022\bin\x64_Release\liblz4.pdb /lz4/bin/ + + mkdir \lz4\include + copy lib\*.h \lz4\include\ + + mkdir \lz4\lib + copy build\VS2022\bin\x64_Release\liblz4.lib \lz4\lib\ + + - name: Create pkgconfig File + run: | + mkdir \lz4\lib\pkgconfig + + >> \lz4\lib\pkgconfig\liblz4.pc echo prefix=${pcfiledir}/../.. + >> \lz4\lib\pkgconfig\liblz4.pc echo exec_prefix=${prefix} + >> \lz4\lib\pkgconfig\liblz4.pc echo libdir=${prefix}/lib + >> \lz4\lib\pkgconfig\liblz4.pc echo includedir=${prefix}/include + >> \lz4\lib\pkgconfig\liblz4.pc echo. + >> \lz4\lib\pkgconfig\liblz4.pc echo Name: liblz4 + >> \lz4\lib\pkgconfig\liblz4.pc echo Version: ${{ env.LZ4_VERSION }} + >> \lz4\lib\pkgconfig\liblz4.pc echo Description: lz4 compression library + >> \lz4\lib\pkgconfig\liblz4.pc echo Libs: -L${libdir} -llz4 + >> \lz4\lib\pkgconfig\liblz4.pc echo Cflags: -I${includedir} + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: lz4-${{ env.LZ4_VERSION }}-src + path: lz4-${{ env.LZ4_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: lz4-${{ env.LZ4_VERSION }}-win64 + path: /lz4 diff --git a/.github/workflows/manifest.yml b/.github/workflows/manifest.yml new file mode 100644 index 0000000000000..3294cab94bb9a --- /dev/null +++ b/.github/workflows/manifest.yml @@ -0,0 +1,102 @@ +name: Set package versions + +on: + workflow_call: + inputs: + config-path: + required: true + default: ./manifest.json + type: string + # Map the workflow outputs to job outputs + outputs: + OPENSSL_VERSION: + description: "openssl version" + value: ${{ jobs.set_versions.outputs.output1 }} + LIBICONV_VERSION: + description: "libiconv version" + value: ${{ jobs.set_versions.outputs.output2 }} + KRB5_VERSION: + description: "krb version" + value: ${{ jobs.set_versions.outputs.output3 }} + GETTEXT_VERSION: + description: "gettext version" + value: ${{ jobs.set_versions.outputs.output4 }} + ICU_VERSION: + description: "icu version" + value: ${{ jobs.set_versions.outputs.output5 }} + LIBXML2_VERSION: + description: "libxml2 version" + value: ${{ jobs.set_versions.outputs.output6 }} + LIBXSLT_VERSION: + description: "libxslt version" + value: ${{ jobs.set_versions.outputs.output7 }} + LZ4_VERSION: + description: "lz4 version" + value: ${{ jobs.set_versions.outputs.output8 }} + MESON_VERSION: + description: "meson version" + value: ${{ jobs.set_versions.outputs.output9 }} + NINJA_VERSION: + description: "ninja version" + value: ${{ jobs.set_versions.outputs.output10 }} + OSSP-UUID_VERSION: + description: "ossp_uuid version" + value: ${{ jobs.set_versions.outputs.output11 }} + PKGCONF_VERSION: + description: "pkgconf version" + value: ${{ jobs.set_versions.outputs.output12 }} + POSTGRESQL_VERSION: + description: "postgresql version" + value: ${{ jobs.set_versions.outputs.output13 }} + POSTGRESQL-DEV_VERSION: + description: "postgresql-dev version" + value: ${{ jobs.set_versions.outputs.output14 }} + ZLIB_VERSION: + description: "zlib version" + value: ${{ jobs.set_versions.outputs.output15 }} + ZSTD_VERSION: + description: "zstd version" + value: ${{ jobs.set_versions.outputs.output16 }} + WINFLEXBISON_VERSION: + description: "winflex bison version" + value: ${{ jobs.set_versions.outputs.output17 }} + DIFFUTILS_VERSION: + description: "diffutils version" + value: ${{ jobs.set_versions.outputs.output18 }} + +jobs: + set_versions: + name: Generate output + runs-on: windows-latest + # Map the job outputs to step outputs + outputs: + output1: ${{ steps.step1.outputs.OPENSSL_VERSION }} + output2: ${{ steps.step1.outputs.LIBICONV_VERSION }} + output3: ${{ steps.step1.outputs.KRB5_VERSION }} + output4: ${{ steps.step1.outputs.GETTEXT_VERSION }} + output5: ${{ steps.step1.outputs.ICU_VERSION }} + output6: ${{ steps.step1.outputs.LIBXML2_VERSION }} + output7: ${{ steps.step1.outputs.LIBXSLT_VERSION }} + output8: ${{ steps.step1.outputs.LZ4_VERSION }} + output9: ${{ steps.step1.outputs.MESON_VERSION }} + output10: ${{ steps.step1.outputs.NINJA_VERSION }} + output11: ${{ steps.step1.outputs.OSSP-UUID_VERSION }} + output12: ${{ steps.step1.outputs.PKGCONF_VERSION }} + output13: ${{ steps.step1.outputs.POSTGRESQL_VERSION }} + output14: ${{ steps.step1.outputs.POSTGRESQL-DEV_VERSION }} + output15: ${{ steps.step1.outputs.ZLIB_VERSION }} + output16: ${{ steps.step1.outputs.ZSTD_VERSION }} + output17: ${{ steps.step1.outputs.WINFLEXBISON_VERSION }} + output18: ${{ steps.step1.outputs.DIFFUTILS_VERSION }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set versions + shell: powershell + id: step1 + run: | + $json = (Get-Content ${{inputs.config-path}} -Raw) | ConvertFrom-Json + ($json.packages) | foreach-object { write "$($_.name.ToUpper())_VERSION=$($_.version)" >> $env:GITHUB_OUTPUT} + diff --git a/.github/workflows/meson.yml b/.github/workflows/meson.yml new file mode 100644 index 0000000000000..9b1bfccd0a128 --- /dev/null +++ b/.github/workflows/meson.yml @@ -0,0 +1,41 @@ +name: Package Meson + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 00 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + package-meson: + runs-on: windows-latest + needs: get-versions + env: + MESON_VERSION: ${{needs.get-versions.outputs.MESON_VERSION}} + steps: + - name: Download + run: | + curl.exe -L -o meson.tar.gz https://github.com/mesonbuild/meson/releases/download/${{ env.MESON_VERSION }}/meson-${{ env.MESON_VERSION }}.tar.gz + + mkdir -p /meson/mesonbuild + mkdir -p /meson/bin + + tar -C /meson/mesonbuild --strip-components=1 -xvzf meson.tar.gz + + echo "@python %~dp0/../mesonbuild/meson.py %*" | out-file -encoding ascii \meson\bin\meson.cmd; + \meson\bin\meson.cmd --help + + $ENV:PATH="/meson/bin;$ENV:PATH" + meson --version + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: meson-${{ env.MESON_VERSION }}-win64 + path: /meson diff --git a/.github/workflows/ninja.yml b/.github/workflows/ninja.yml new file mode 100644 index 0000000000000..a67e71f22e34b --- /dev/null +++ b/.github/workflows/ninja.yml @@ -0,0 +1,37 @@ +name: Package Ninja + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 00 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + package-ninja: + needs: get-versions + runs-on: windows-latest + env: + NINJA_VERSION: ${{needs.get-versions.outputs.NINJA_VERSION}} + steps: + - name: Download + run: | + curl -L -o ninja.zip https://github.com/ninja-build/ninja/releases/download/v${{ env.NINJA_VERSION }}/ninja-win.zip + mkdir /ninja/bin + + unzip ninja.zip + cp ninja.exe /ninja/bin + ls /ninja/bin + $ENV:PATH="/ninja/bin;$ENV:PATH" + ninja --version || true + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: ninja-${{ env.NINJA_VERSION }}-win64 + path: /ninja diff --git a/.github/workflows/openssl.yml b/.github/workflows/openssl.yml new file mode 100644 index 0000000000000..bb84871662bcc --- /dev/null +++ b/.github/workflows/openssl.yml @@ -0,0 +1,77 @@ +name: Build OpenSSL + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-openssl: + needs: get-versions + runs-on: windows-latest + env: + OPENSSL_VERSION: ${{needs.get-versions.outputs.OPENSSL_VERSION}} + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download + run: | + curl -L https://www.openssl.org/source/openssl-${{ env.OPENSSL_VERSION }}.tar.gz -o ./openssl-${{ env.OPENSSL_VERSION }}.tar.gz + tar zxvf openssl-${{ env.OPENSSL_VERSION }}.tar.gz + + - name: Configure + run: | + cd openssl-${{ env.OPENSSL_VERSION }} + perl Configure VC-WIN64A no-asm --prefix=\openssl no-ssl3 no-comp + + - name: Build + run: | + cd openssl-${{ env.OPENSSL_VERSION }} + nmake + + - name: Test + run: | + cd openssl-${{ env.OPENSSL_VERSION }} + nmake test + + - name: Install + run: | + mkdir \openssl + cd openssl-${{ env.OPENSSL_VERSION }} + nmake install + + - name: Create pkgconfig File + run: | + mkdir \openssl\lib\pkgconfig + + >> \openssl\lib\pkgconfig\openssl.pc echo prefix=${pcfiledir}/../.. + >> \openssl\lib\pkgconfig\openssl.pc echo exec_prefix=${prefix} + >> \openssl\lib\pkgconfig\openssl.pc echo libdir=${prefix}/lib + >> \openssl\lib\pkgconfig\openssl.pc echo includedir=${prefix}/include + >> \openssl\lib\pkgconfig\openssl.pc echo. + >> \openssl\lib\pkgconfig\openssl.pc echo Name: openssl + >> \openssl\lib\pkgconfig\openssl.pc echo Description: openssl encryption library + >> \openssl\lib\pkgconfig\openssl.pc echo Version: ${{ env.OPENSSL_VERSION }} + >> \openssl\lib\pkgconfig\openssl.pc echo Libs: -L${libdir} -lcrypto -lssl + >> \openssl\lib\pkgconfig\openssl.pc echo Cflags: -I${includedir} + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: openssl-${{ env.OPENSSL_VERSION }}-src + path: openssl-${{ env.OPENSSL_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: openssl-${{ env.OPENSSL_VERSION }}-win64 + path: /openssl diff --git a/.github/workflows/ossp-uuid.yml b/.github/workflows/ossp-uuid.yml new file mode 100644 index 0000000000000..6a2cc00061349 --- /dev/null +++ b/.github/workflows/ossp-uuid.yml @@ -0,0 +1,95 @@ +name: Build ossp-uuid + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-ossp-uuid: + needs: get-versions + runs-on: windows-latest + defaults: + run: + shell: msys2 {0} + env: + OSSP-UUID_VERSION: ${{needs.get-versions.outputs.OSSP-UUID_VERSION}} + + steps: + - uses: msys2/setup-msys2@v2 + with: + msystem: UCRT64 + update: true + install: mingw-w64-ucrt-x86_64-gcc automake autoconf pkg-config make zip patch tar + + - name: Download + # We get this from the Debian repo, as OSSP is basically unavailable these days + run: | + curl https://salsa.debian.org/debian/ossp-uuid/-/archive/upstream/${{ env.OSSP-UUID_VERSION }}/ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}.tar.gz -o ./ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}.tar.gz + tar zxvf ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}.tar.gz + + - name: Configure + run: | + cd ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }} + ./configure --prefix=$(pwd)/winpgbuild + shell: bash + + - name: Build + run: | + cd ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }} + make all + shell: bash + + - name: Install + # Manual installation, as .exe extensions aren't handled properly + run: | + cd ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }} + mkdir winpgbuild + mkdir winpgbuild/bin + mkdir winpgbuild/include + mkdir winpgbuild/lib + + cp .libs/uuid.exe winpgbuild/bin + cp uuid.h winpgbuild/include + cp .libs/libuuid.a winpgbuild/lib + cp .libs/libuuid.a winpgbuild/lib/uuid.lib + cp .libs/libuuid.la winpgbuild/lib + cp .libs/libuuid.lai winpgbuild/lib + + - name: Create pkgconfig File + run: | + cd ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }} + mkdir winpgbuild/lib/pkgconfig + + cat << EOF > winpgbuild/lib/pkgconfig/ossp-uuid.pc + prefix=\${pcfiledir}/../.. + exec_prefix=\${prefix} + libdir=\${prefix}/lib + includedir=\${prefix}/include + + Name: ossp-uuid + Description: ossp uuid library + Version: ${{ env.OSSP-UUID_VERSION }} + Libs: -L\${libdir} -luuid + Cflags: -I\${includedir} + EOF + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}-src + path: ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: ossp-uuid-${{ env.OSSP-UUID_VERSION }}-win64 + path: ossp-uuid-upstream-${{ env.OSSP-UUID_VERSION }}/winpgbuild diff --git a/.github/workflows/pkgconf.yml b/.github/workflows/pkgconf.yml new file mode 100644 index 0000000000000..ce9da86e338cd --- /dev/null +++ b/.github/workflows/pkgconf.yml @@ -0,0 +1,88 @@ +name: Package pkgconf + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 00 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + # There are no obviously trustworth places (it's typically old sourceforge + # pages) to download pkgconf from. Doing the choco install every time is too + # slow (and uses such an old sourceforge page). So it seems easiest to just + # build it here. + package-pkgconf: + runs-on: windows-latest + needs: get-versions + env: + MESON_VERSION: ${{needs.get-versions.outputs.MESON_VERSION}} + NINJA_VERSION: ${{needs.get-versions.outputs.NINJA_VERSION}} + PKGCONF_VERSION: ${{needs.get-versions.outputs.PKGCONF_VERSION}} + + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download meson + uses: dawidd6/action-download-artifact@v3 + with: + workflow: meson.yml + workflow_conclusion: success + name: meson-${{ env.MESON_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Download ninja + uses: dawidd6/action-download-artifact@v3 + with: + workflow: ninja.yml + workflow_conclusion: success + name: ninja-${{ env.NINJA_VERSION }}-win64 + path: /builddeps + if_no_artifact_found: fail + + - name: Add build deps to path + run: | + echo "/builddeps/bin" >> $ENV:GITHUB_PATH + + - name: Download + run: | + curl.exe -L https://github.com/pkgconf/pkgconf/archive/refs/tags/pkgconf-${{ env.PKGCONF_VERSION }}.tar.gz -o pkgconf-${{ env.PKGCONF_VERSION }}.tar.gz + tar zxvf ./pkgconf-${{ env.PKGCONF_VERSION }}.tar.gz + mv pkgconf-pkgconf-${{ env.PKGCONF_VERSION }} pkgconf + shell: bash + + - name: Configure + run: | + cd pkgconf + meson setup build -Dbuildtype=release --default-library static --prefix \pkgconf + + - name: Build + run: | + cd pkgconf + ninja -C build + + - name: Install + run: | + cd pkgconf + ninja -C build install + # that way we don't need to tell meson about the non-standard name + cp \pkgconf\bin\pkgconf.exe \pkgconf\bin\pkg-config.exe + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: pkgconf-${{ env.PKGCONF_VERSION }}-src + path: pkgconf-${{ env.PKGCONF_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: pkgconf-${{ env.PKGCONF_VERSION }}-win64 + path: /pkgconf diff --git a/.github/workflows/postgresql-dev.yml b/.github/workflows/postgresql-dev.yml new file mode 100644 index 0000000000000..c2f5d48ec85f5 --- /dev/null +++ b/.github/workflows/postgresql-dev.yml @@ -0,0 +1,169 @@ +name: Build PostgreSQL (Dev) + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 04 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-postgresql-dev: + needs: get-versions + strategy: + fail-fast: false + env: + POSTGRESQL-DEV_VERSION: ${{needs.get-versions.outputs.POSTGRESQL-DEV_VERSION}} + + runs-on: windows-latest + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download dependencies + uses: dawidd6/action-download-artifact@v7 + with: + workflow: bundle-deps.yml + workflow_conclusion: success + name: all-deps-win64 + path: /builddeps + if_no_artifact_found: fail + + # Copy libraries requires at runtime to installation directory. + # + # Do so before configuring / building postgres, otherwise we need to + # duplicate knowledge about aberrant paths like "bin64". + - name: Install Dependencies + run: | + mkdir \postgresql + mkdir \postgresql\bin + + copy \builddeps\bin64\icuuc*.dll \postgresql\bin\ + copy \builddeps\bin64\icudt*.dll \postgresql\bin\ + copy \builddeps\bin64\icuin*.dll \postgresql\bin\ + copy \builddeps\bin\libiconv-2.dll \postgresql\bin\ + copy \builddeps\bin\libintl-8.dll \postgresql\bin\ + copy \builddeps\bin\libxml2.dll \postgresql\bin\ + copy \builddeps\bin\libxslt.dll \postgresql\bin\ + copy \builddeps\bin\libssl-*-x64.dll \postgresql\bin\ + copy \builddeps\bin\libcrypto-*-x64.dll \postgresql\bin\ + copy \builddeps\bin\liblz4.dll \postgresql\bin\ + copy \builddeps\bin\libzstd.dll \postgresql\bin\ + copy \builddeps\bin\zlib1.dll \postgresql\bin\ + + - name: Add build deps to path + run: | + # so binaries and libraries can be found/run + echo "/builddeps/bin" >> $ENV:GITHUB_PATH + echo "/builddeps" >> $ENV:GITHUB_PATH + echo "/postgresql/bin" >> $ENV:GITHUB_PATH + + # This is run as a privileged user. For some reason windows ends up + # creating the directories owned by "Administrator", which causes + # problems because when postgres drops privileges, it doesn't have + # sufficient rights to access them anymore! + # + # I have pulled most of my hair out over the last hours. + # + # See also https://www.postgresql.org/message-id/20240707064046.blgjxoqiywunbebl%40awork3.anarazel.de + - name: Work around privilege issue + run: | + icacls.exe . /inheritance:e /grant 'runneradmin:(OI)(CI)F' + + - name: Checkout + uses: actions/checkout@v4 + with: + repository: postgres/postgres + path: postgresql-${{ env.POSTGRESQL-DEV_VERSION }} + ref: ${{ env.POSTGRESQL-DEV_VERSION }} + + - name: Source archive + run: | + cd postgresql-${{ env.POSTGRESQL-DEV_VERSION }} + git archive --format=tar.gz -o ../postgresql-dev-${{ env.POSTGRESQL-DEV_VERSION }}.tar.gz --prefix=postgresql-dev-${{ env.POSTGRESQL-DEV_VERSION }}/ ${{ env.POSTGRESQL-DEV_VERSION }} + + - name: Configure + run: | + cd postgresql-${{ env.POSTGRESQL-DEV_VERSION }} + + # don't use \path style paths for library search, link.exe ends up + # interpreting paths like that as flags! + $deps = resolve-path /builddeps + + # can't enable some extra tests + # - libpq_encryption -> fails for unknown reasons + # - kerberos -> test not yet supported on windows + # - load_balance -> would need to set up hostnames + meson setup ` + --prefix=\postgresql ` + "--cmake-prefix-path=${deps}" ` + "--pkg-config-path=${deps}\lib\pkgconfig" ` + "-Dextra_include_dirs=${deps}\include" ` + "-Dextra_lib_dirs=${deps}\lib,${deps}\lib\amd64" ` + "-DPG_TEST_EXTRA=ldap ssl" ` + -Duuid=ossp ` + -Db_pch=true ` + -Dgssapi=disabled ` + -Dbuildtype=debugoptimized ` + build + + - name: Build + run: | + cd postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build + ninja -j 1 + + - name: Test + run: | + cd postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build + + # use unix socket to prevent port conflicts + $env:PG_TEST_USE_UNIX_SOCKETS = 1; + # otherwise pg_regress insists on creating the directory and does it + # in a non-existing place, this needs to be fixed :( + mkdir d:/sockets + $env:PG_REGRESS_SOCK_DIR = "d:/sockets/" + + meson test --timeout-multiplier 2 + + - name: Upload Test Results + if: ${{ !success() }} + uses: actions/upload-artifact@v4 + with: + name: test_logs + path: | + postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\testrun\**\*.log + postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\testrun\**\regress_log_* + postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\testrun\**\*.diffs + postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\testrun\**\*.out + postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\meson-logs\testlog.txt + + - name: Install + run: | + cd postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build + + meson install --quiet + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: postgresql-dev-${{ env.POSTGRESQL-DEV_VERSION }}-src + path: postgresql-dev-${{ env.POSTGRESQL-DEV_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: postgresql-dev-${{ env.POSTGRESQL-DEV_VERSION }}-win64 + path: /postgresql + + - name: Upload Meson Log + if: always() + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: meson_log + path: postgresql-${{ env.POSTGRESQL-DEV_VERSION }}\build\meson-logs\meson-log.txt diff --git a/.github/workflows/postgresql.yml b/.github/workflows/postgresql.yml new file mode 100644 index 0000000000000..d9b11d5a2037c --- /dev/null +++ b/.github/workflows/postgresql.yml @@ -0,0 +1,243 @@ +name: Build PostgreSQL + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 04 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-postgresql: + needs: get-versions + + strategy: + fail-fast: false + matrix: + version: ${{ fromJSON(needs.get-versions.outputs.POSTGRESQL_VERSION) }} + + runs-on: windows-latest + + # -m enables parallelism + # verbosity:minimal + Summary reduce verbosity, while keeping a summary of + # errors/warnings + # ForceNoAlign prevents msbuild from introducing line-breaks for long lines + # disable file tracker, we're never going to rebuild, and it slows down the + # build + env: + MSBFLAGS: -m -verbosity:minimal "-consoleLoggerParameters:Summary;ForceNoAlign" /p:TrackFileAccess=false -nologo + POSTGRESQL_VERSIONS: ${{needs.get-versions.outputs.POSTGRESQL_VERSION}} + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download dependencies + uses: dawidd6/action-download-artifact@v7 + with: + workflow: bundle-deps.yml + workflow_conclusion: success + name: all-deps-win64 + path: /builddeps + if_no_artifact_found: fail + + # Copy libraries requires at runtime to installation directory. + # + # Do so before configuring / building postgres, otherwise we need to + # duplicate knowledge about aberrant paths like "bin64". + - name: Install Dependencies + run: | + mkdir \postgresql + mkdir \postgresql\bin + + copy \builddeps\bin64\icuuc*.dll \postgresql\bin\ + copy \builddeps\bin64\icudt*.dll \postgresql\bin\ + copy \builddeps\bin64\icuin*.dll \postgresql\bin\ + copy \builddeps\bin\libiconv-2.dll \postgresql\bin\ + copy \builddeps\bin\libintl-8.dll \postgresql\bin\ + copy \builddeps\bin\libxml2.dll \postgresql\bin\ + copy \builddeps\bin\libxslt.dll \postgresql\bin\ + copy \builddeps\bin\libssl-*-x64.dll \postgresql\bin\ + copy \builddeps\bin\libcrypto-*-x64.dll \postgresql\bin\ + copy \builddeps\bin\liblz4.dll \postgresql\bin\ + copy \builddeps\bin\libzstd.dll \postgresql\bin\ + copy \builddeps\bin\zlib1.dll \postgresql\bin\ + + - name: Add build deps to path + run: | + # so binaries and libraries can be found/run + echo "/builddeps/bin" >> $ENV:GITHUB_PATH + echo "/builddeps" >> $ENV:GITHUB_PATH + echo "/postgresql/bin" >> $ENV:GITHUB_PATH + + # This is run as a privileged user. For some reason windows ends up + # creating the directories owned by "Administrator", which causes + # problems because when postgres drops privileges, it doesn't have + # sufficient rights to access them anymore! + # + # I have pulled most of my hair out over the last hours. + # + # See also https://www.postgresql.org/message-id/20240707064046.blgjxoqiywunbebl%40awork3.anarazel.de + - name: Work around privilege issue + run: | + icacls.exe . /inheritance:e /grant 'runneradmin:(OI)(CI)F' + + - name: Download + run: | + curl https://ftp.postgresql.org/pub/source/v${{ matrix.version }}/postgresql-${{ matrix.version }}.tar.gz -o ./postgresql-${{ matrix.version }}.tar.gz + tar zxvf postgresql-${{ matrix.version }}.tar.gz + + - name: Configure (msvc) + run: | + cd postgresql-${{ matrix.version }}\src\tools\msvc + + >> config.pl echo # Configuration arguments for vcbuild. + >> config.pl echo use strict; + >> config.pl echo use warnings; + >> config.pl echo. + >> config.pl echo our $config = { + >> config.pl echo asserts =^> 0, # --enable-cassert + >> config.pl echo ldap =^> 1, # --with-ldap + >> config.pl echo extraver =^> undef, # --with-extra-version=^ + >> config.pl echo gss =^> undef, # --with-gssapi=^ + >> config.pl echo icu =^> '\builddeps', # --with-icu=^ + >> config.pl echo lz4 =^> '\builddeps', # --with-lz4=^ + >> config.pl echo zstd =^> '\builddeps', # --with-zstd=^ + >> config.pl echo nls =^> '\builddeps', # --enable-nls=^ + >> config.pl echo tap_tests =^> 1, # --enable-tap-tests + >> config.pl echo tcl =^> undef, # --with-tcl=^ + >> config.pl echo perl =^> undef, # --with-perl + >> config.pl echo python =^> undef, # --with-python=^ + >> config.pl echo openssl =^> '\builddeps', # --with-openssl=^ + >> config.pl echo uuid =^> '\builddeps', # --with-ossp-uuid + >> config.pl echo xml =^> '\builddeps', # --with-libxml=^ + >> config.pl echo xslt =^> '\builddeps', # --with-libxslt=^ + >> config.pl echo iconv =^> '\builddeps', # (not in configure, path to iconv) + >> config.pl echo zlib =^> '\builddeps' # --with-zlib=^ + >> config.pl echo }; + >> config.pl echo. + >> config.pl echo 1; + shell: cmd + if: ${{ fromJson(matrix.version) < 17.0 }} + + - name: Configure (meson) + run: | + cd postgresql-${{ matrix.version }} + + # don't use \path style paths for library search, link.exe ends up + # interpreting paths like that as flags! + $deps = resolve-path /builddeps + + # can't enable some extra tests + # - libpq_encryption -> fails for unknown reasons + # - kerberos -> test not yet supported on windows + # - load_balance -> would need to set up hostnames + meson setup ` + --prefix=\postgresql ` + "--cmake-prefix-path=${deps}" ` + "--pkg-config-path=${deps}\lib\pkgconfig" ` + "-Dextra_include_dirs=${deps}\include" ` + "-Dextra_lib_dirs=${deps}\lib,${deps}\lib\amd64" ` + "-DPG_TEST_EXTRA=ldap ssl" ` + -Duuid=ossp ` + -Db_pch=true ` + -Dgssapi=disabled ` + -Dbuildtype=debugoptimized ` + build + if: ${{ fromJson(matrix.version) >= 17.0 }} + + - name: Build (msvc) + run: | + cd postgresql-${{ matrix.version }}\src\tools\msvc + build + shell: cmd + if: ${{ fromJson(matrix.version) < 17.0 }} + + - name: Build (meson) + run: | + cd postgresql-${{ matrix.version }}\build + ninja -j 1 + if: ${{ fromJson(matrix.version) >= 17.0 }} + + - name: Test (msvc) + run: | + cd postgresql-${{ matrix.version }}\src\tools\msvc + vcregress check + shell: cmd + if: ${{ fromJson(matrix.version) < 17.0 }} + + - name: Test (meson) + run: | + cd postgresql-${{ matrix.version }}\build + + # use unix socket to prevent port conflicts + $env:PG_TEST_USE_UNIX_SOCKETS = 1; + # otherwise pg_regress insists on creating the directory and does it + # in a non-existing place, this needs to be fixed :( + mkdir d:/sockets + $env:PG_REGRESS_SOCK_DIR = "d:/sockets/" + + meson test + if: ${{ fromJson(matrix.version) >= 17.0 }} + + - name: Upload Test Results + uses: actions/upload-artifact@v4 + with: + if-no-files-found: ignore + name: postgresql-${{ matrix.version }}-test-logs + path: | + postgresql-${{ matrix.version }}\build\testrun\**\*.log + postgresql-${{ matrix.version }}\build\testrun\**\regress_log_* + postgresql-${{ matrix.version }}\build\testrun\**\*.diffs + postgresql-${{ matrix.version }}\build\testrun\**\*.out + postgresql-${{ matrix.version }}\build\meson-logs\testlog.txt + if: always() + + - name: Install (msvc) + run: | + cd postgresql-${{ matrix.version }}\src\tools\msvc + + perl install.pl \postgresql + shell: cmd + if: ${{ fromJson(matrix.version) < 17.0 }} + + - name: Install (meson) + run: | + cd postgresql-${{ matrix.version }}\build + + meson install --quiet + if: ${{ fromJson(matrix.version) >= 17.0 }} + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: postgresql-${{ matrix.version }}-src + path: postgresql-${{ matrix.version }}.tar.gz + + - name: Upload Config + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: postgresql-${{ matrix.version }}-config + path: | + postgresql-${{ matrix.version }}/src/tools/msvc/config.pl + postgresql-${{ matrix.version }}/src/tools/msvc/buildenv.pl + if: ${{ fromJson(matrix.version) < 17.0 }} + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: postgresql-${{ matrix.version }}-win64 + path: /postgresql + + - name: Upload Meson Log + uses: actions/upload-artifact@v4 + with: + if-no-files-found: ignore + name: postgresql-${{ matrix.version }}-meson-log + path: postgresql-${{ matrix.version }}\build\meson-logs\meson-log.txt + if: always() diff --git a/.github/workflows/winflexbison.yml b/.github/workflows/winflexbison.yml new file mode 100644 index 0000000000000..451e0c0e29550 --- /dev/null +++ b/.github/workflows/winflexbison.yml @@ -0,0 +1,40 @@ +name: Package winflexbison + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 00 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + package-winflexbison: + runs-on: windows-latest + needs: get-versions + env: + WINFLEXBISON_VERSION: ${{needs.get-versions.outputs.WINFLEXBISON_VERSION}} + + steps: + - name: Download + run: | + curl -L -o winflexbison.zip "https://github.com/lexxmark/winflexbison/releases/download/v${{ env.WINFLEXBISON_VERSION }}/win_flex_bison-${{ env.WINFLEXBISON_VERSION }}.zip" + mkdir winflexbison + cd winflexbison + unzip ../winflexbison.zip + + # while the meson build nows about win_{bison,flex}, src/tools/msvc doesn't + - name: Rename Binaries + run: | + mv winflexbison/win_bison.exe winflexbison/bison.exe + mv winflexbison/win_flex.exe winflexbison/flex.exe + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: winflexbison-${{ env.WINFLEXBISON_VERSION }}-win64 + path: winflexbison diff --git a/.github/workflows/zlib.yml b/.github/workflows/zlib.yml new file mode 100644 index 0000000000000..d5250c0d5748f --- /dev/null +++ b/.github/workflows/zlib.yml @@ -0,0 +1,82 @@ +name: Build zlib + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-zlib: + runs-on: windows-latest + needs: get-versions + env: + ZLIB_VERSION: ${{needs.get-versions.outputs.ZLIB_VERSION}} + steps: + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download + run: | + curl https://zlib.net/zlib-${{ env.ZLIB_VERSION }}.tar.gz -o ./zlib-${{ env.ZLIB_VERSION }}.tar.gz + tar zxvf zlib-${{ env.ZLIB_VERSION }}.tar.gz + + - name: Build + run: | + cd zlib-${{ env.ZLIB_VERSION }} + nmake -f win32/Makefile.msc + + - name: Install + run: | + cd zlib-${{ env.ZLIB_VERSION }} + + mkdir \zlib + + mkdir \zlib\bin + copy zlib1.dll \zlib\bin\ + copy zlib1.pdb \zlib\bin\ + + mkdir \zlib\include + copy zlib.h \zlib\include\ + copy zconf.h \zlib\include\ + + mkdir \zlib\lib + copy zlib.lib \zlib\lib\ + copy zlib.pdb \zlib\lib\ + copy zdll.lib \zlib\lib\ + copy zdll.exp \zlib\lib\ + + - name: Create pkgconfig File + run: | + mkdir \zlib\lib\pkgconfig + + >> \zlib\lib\pkgconfig\zlib.pc echo prefix=${pcfiledir}/../.. + >> \zlib\lib\pkgconfig\zlib.pc echo exec_prefix=${prefix} + >> \zlib\lib\pkgconfig\zlib.pc echo libdir=${prefix}/lib + >> \zlib\lib\pkgconfig\zlib.pc echo includedir=${prefix}/include + >> \zlib\lib\pkgconfig\zlib.pc echo. + >> \zlib\lib\pkgconfig\zlib.pc echo Name: zlib + >> \zlib\lib\pkgconfig\zlib.pc echo Description: zlib compression library + >> \zlib\lib\pkgconfig\zlib.pc echo Version: ${{ env.ZLIB_VERSION }} + >> \zlib\lib\pkgconfig\zlib.pc echo Requires: + >> \zlib\lib\pkgconfig\zlib.pc echo Libs: -L${libdir} -lzdll + >> \zlib\lib\pkgconfig\zlib.pc echo Cflags: -I${includedir} + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: zlib-${{ env.ZLIB_VERSION }}-src + path: zlib-${{ env.ZLIB_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: zlib-${{ env.ZLIB_VERSION }}-win64 + path: /zlib diff --git a/.github/workflows/zstd.yml b/.github/workflows/zstd.yml new file mode 100644 index 0000000000000..375cadf34104a --- /dev/null +++ b/.github/workflows/zstd.yml @@ -0,0 +1,85 @@ +name: Build zstd + +on: + workflow_dispatch: + workflow_call: + schedule: + - cron: "00 01 * * *" + +jobs: + get-versions: + uses: ./.github/workflows/manifest.yml + with: + config-path: manifest.json + + build-zstd: + runs-on: windows-latest + needs: get-versions + env: + ZSTD_VERSION: ${{needs.get-versions.outputs.ZSTD_VERSION}} + steps: + - uses: microsoft/setup-msbuild@v2 + - uses: ilammy/msvc-dev-cmd@v1 + + - name: Download + run: | + curl -L https://github.com/facebook/zstd/releases/download/v${{ env.ZSTD_VERSION }}/zstd-${{ env.ZSTD_VERSION }}.tar.gz -o ./zstd-${{ env.ZSTD_VERSION }}.tar.gz + tar zxvf zstd-${{ env.ZSTD_VERSION }}.tar.gz + + - name: Configure + run: | + cd zstd-${{ env.ZSTD_VERSION }} + devenv.com /upgrade build\VS2010\zstd.sln + + - name: Build + run: | + cd zstd-${{ env.ZSTD_VERSION }} + msbuild build\VS2010\zstd.sln -target:libzstd-dll /property:Configuration=Release /property:Platform=x64 + + - name: Install + run: | + cd zstd-${{ env.ZSTD_VERSION }} + + mkdir \zstd + + mkdir \zstd\bin + copy build\VS2010\bin\x64_Release\libzstd.dll \zstd\bin\ + copy build\VS2010\bin\x64_Release\libzstd.pdb \zstd\bin\ + + mkdir \zstd\include + copy lib\*.h \zstd\include\ + + mkdir \zstd\lib + copy build\VS2010\bin\x64_Release\libzstd.lib \zstd\lib\ + + - name: Create pkgconfig File + run: | + mkdir \zstd\lib\pkgconfig + + >> \zstd\lib\pkgconfig\libzstd.pc echo prefix=${pcfiledir}/../.. + >> \zstd\lib\pkgconfig\libzstd.pc echo exec_prefix=${prefix} + >> \zstd\lib\pkgconfig\libzstd.pc echo libdir=${prefix}/lib + >> \zstd\lib\pkgconfig\libzstd.pc echo includedir=${prefix}/include + >> \zstd\lib\pkgconfig\libzstd.pc echo. + >> \zstd\lib\pkgconfig\libzstd.pc echo Name: zstd + >> \zstd\lib\pkgconfig\libzstd.pc echo Description: fast lossless compression algorithm library + >> \zstd\lib\pkgconfig\libzstd.pc echo URL: https://facebook.github.io/zstd/ + >> \zstd\lib\pkgconfig\libzstd.pc echo Version: ${{ env.ZSTD_VERSION }} + >> \zstd\lib\pkgconfig\libzstd.pc echo Libs: -L${libdir} -lzstd + >> \zstd\lib\pkgconfig\libzstd.pc echo Libs.private: + >> \zstd\lib\pkgconfig\libzstd.pc echo Cflags: -I${includedir} + shell: cmd + + - name: Upload Source + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: zstd-${{ env.ZSTD_VERSION }}-src + path: zstd-${{ env.ZSTD_VERSION }}.tar.gz + + - name: Upload Binaries + uses: actions/upload-artifact@v4 + with: + if-no-files-found: error + name: zstd-${{ env.ZSTD_VERSION }}-win64 + path: /zstd diff --git a/manifest.json b/manifest.json new file mode 100644 index 0000000000000..e2a9e9f7fdb8e --- /dev/null +++ b/manifest.json @@ -0,0 +1,112 @@ +{ + "packages": [ + { + "name": "gettext", + "version": "0.25", + "source": "https://www.gnu.org/software/gettext/", + "licence": "https://www.gnu.org/software/gettext/manual/html_node/GNU-GPL.html" + }, + { + "name": "icu", + "version": "77.1", + "source": "https://icu.unicode.org/home", + "licence": "https://github.com/unicode-org/icu/blob/main/LICENSE" + }, + { + "name": "libiconv", + "version": "1.18", + "source": "https://www.gnu.org/software/libiconv/", + "licence": "https://www.gnu.org/licenses/lgpl-2.1.html" + }, + { + "name": "krb5", + "version": "1.21.3", + "source": "https://web.mit.edu/kerberos/", + "licence": "https://web.mit.edu/kerberos/krb5-1.21/doc/mitK5license.html" + }, + { + "name": "libxml2", + "version": "2.13.8", + "source": "https://gitlab.gnome.org/GNOME/libxml2/", + "licence": "https://gitlab.gnome.org/GNOME/libxml2/-/blob/master/Copyright?ref_type=heads" + }, + { + "name": "libxslt", + "version": "1.1.43", + "source": "https://gitlab.gnome.org/GNOME/libxslt/", + "licence": "https://gitlab.gnome.org/GNOME/libxslt/-/blob/master/Copyright?ref_type=heads" + }, + { + "name": "lz4", + "version": "1.10.0", + "source": "https://lz4.org/", + "licence": "https://github.com/lz4/lz4/blob/dev/LICENSE" + }, + { + "name": "openssl", + "version": "3.0.17", + "source": "https://openssl.org/", + "licence": "https://www.openssl.org/source/apache-license-2.0.txt" + }, + { + "name": "ossp-uuid", + "version": "1.6.2", + "source": "https://salsa.debian.org/debian/ossp-uuid/", + "licence": "https://salsa.debian.org/debian/ossp-uuid/-/blob/master/README?ref_type=heads" + }, + { + "name": "zlib", + "version": "1.3.1", + "source": "https://zlib.net/", + "licence": "https://zlib.net/zlib_license.html" + }, + { + "name": "zstd", + "version": "1.5.7", + "source": "https://facebook.github.io/zstd/", + "licence": "https://github.com/facebook/zstd/blob/dev/LICENSE" + }, + { + "name": "diffutils", + "version": "2.8.7-1", + "source": "https://gnuwin32.sourceforge.net/packages/diffutils.htm", + "licence": "https://git.savannah.gnu.org/cgit/diffutils.git/tree/COPYING" + }, + { + "name": "meson", + "version": "1.8.2", + "source": "https://mesonbuild.com", + "licence": "https://github.com/mesonbuild/meson/blob/master/COPYING" + }, + { + "name": "ninja", + "version": "1.13.0", + "source": "https://ninja-build.org", + "licence": "https://github.com/ninja-build/ninja/blob/master/COPYING" + }, + { + "name": "pkgconf", + "version": "2.5.1", + "source": "http://pkgconf.org", + "licence": "https://github.com/pkgconf/pkgconf/blob/master/COPYING" + }, + { + "name": "winflexbison", + "version": "2.5.25", + "source": "https://github.com/lexxmark/winflexbison", + "licence": "https://github.com/lexxmark/winflexbison/blob/master/flex/src/COPYING" + }, + { + "name": "postgresql-dev", + "version": "master", + "source": "https://github.com/postgres/postgres", + "licence": "https://www.postgresql.org/about/licence/" + }, + { + "name": "postgresql", + "version": "['17.5', '16.9', '15.13', '14.18', '13.21']", + "source": "https://ftp.postgresql.org/pub/source/", + "licence": "https://www.postgresql.org/about/licence/" + } + ] + } From 608a3a356f98cbf19a1b89c7eb079acfc7b14379 Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Mon, 4 Aug 2025 08:07:04 -0400 Subject: [PATCH 3/7] dev --- .idea/codeStyles/Project.xml | 106 ++++ .idea/codeStyles/codeStyleConfig.xml | 5 + .idea/editor.xml | 856 ++++++++++----------------- 3 files changed, 421 insertions(+), 546 deletions(-) create mode 100644 .idea/codeStyles/Project.xml create mode 100644 .idea/codeStyles/codeStyleConfig.xml diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml new file mode 100644 index 0000000000000..531626720474b --- /dev/null +++ b/.idea/codeStyles/Project.xml @@ -0,0 +1,106 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml new file mode 100644 index 0000000000000..79ee123c2b23e --- /dev/null +++ b/.idea/codeStyles/codeStyleConfig.xml @@ -0,0 +1,5 @@ + + + + \ No newline at end of file diff --git a/.idea/editor.xml b/.idea/editor.xml index 1f0ef49b4faf4..5351bb28cc75a 100644 --- a/.idea/editor.xml +++ b/.idea/editor.xml @@ -1,580 +1,344 @@ - \ No newline at end of file From 6dc76286fce2df699331b0e13d9457d73512710d Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Tue, 12 Aug 2025 06:57:12 -0400 Subject: [PATCH 4/7] Use consistent naming of the clock-sweep algorithm. Minor edits to comments only. --- src/backend/storage/buffer/README | 4 ++-- src/backend/storage/buffer/bufmgr.c | 8 ++++---- src/backend/storage/buffer/freelist.c | 10 +++++----- src/backend/storage/buffer/localbuf.c | 2 +- src/include/storage/buf_internals.h | 4 ++-- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README index a182fcd660ccb..4b13da5d7add8 100644 --- a/src/backend/storage/buffer/README +++ b/src/backend/storage/buffer/README @@ -211,9 +211,9 @@ Buffer Ring Replacement Strategy When running a query that needs to access a large number of pages just once, such as VACUUM or a large sequential scan, a different strategy is used. A page that has been touched only by such a scan is unlikely to be needed -again soon, so instead of running the normal clock sweep algorithm and +again soon, so instead of running the normal clock-sweep algorithm and blowing out the entire buffer cache, a small ring of buffers is allocated -using the normal clock sweep algorithm and those buffers are reused for the +using the normal clock-sweep algorithm and those buffers are reused for the whole scan. This also implies that much of the write traffic caused by such a statement will be done by the backend itself and not pushed off onto other processes. diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index fd7e21d96d31f..396b053b3faf3 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3608,7 +3608,7 @@ BufferSync(int flags) * This is called periodically by the background writer process. * * Returns true if it's appropriate for the bgwriter process to go into - * low-power hibernation mode. (This happens if the strategy clock sweep + * low-power hibernation mode. (This happens if the strategy clock-sweep * has been "lapped" and no buffer allocations have occurred recently, * or if the bgwriter has been effectively disabled by setting * bgwriter_lru_maxpages to 0.) @@ -3658,7 +3658,7 @@ BgBufferSync(WritebackContext *wb_context) uint32 new_recent_alloc; /* - * Find out where the freelist clock sweep currently is, and how many + * Find out where the freelist clock-sweep currently is, and how many * buffer allocations have happened since our last call. */ strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc); @@ -3679,8 +3679,8 @@ BgBufferSync(WritebackContext *wb_context) /* * Compute strategy_delta = how many buffers have been scanned by the - * clock sweep since last time. If first time through, assume none. Then - * see if we are still ahead of the clock sweep, and if so, how many + * clock-sweep since last time. If first time through, assume none. Then + * see if we are still ahead of the clock-sweep, and if so, how many * buffers we could scan before we'd catch up with it and "lap" it. Note: * weird-looking coding of xxx_passes comparisons are to avoid bogus * behavior when the passes counts wrap around. diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 01909be027258..cd94a7d8a7b39 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -33,7 +33,7 @@ typedef struct slock_t buffer_strategy_lock; /* - * Clock sweep hand: index of next buffer to consider grabbing. Note that + * clock-sweep hand: index of next buffer to consider grabbing. Note that * this isn't a concrete buffer - we only ever increase the value. So, to * get an actual buffer, it needs to be used modulo NBuffers. */ @@ -51,7 +51,7 @@ typedef struct * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. */ - uint32 completePasses; /* Complete cycles of the clock sweep */ + uint32 completePasses; /* Complete cycles of the clock-sweep */ pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */ /* @@ -311,7 +311,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r } } - /* Nothing on the freelist, so run the "clock sweep" algorithm */ + /* Nothing on the freelist, so run the "clock-sweep" algorithm */ trycounter = NBuffers; for (;;) { @@ -511,7 +511,7 @@ StrategyInitialize(bool init) StrategyControl->firstFreeBuffer = 0; StrategyControl->lastFreeBuffer = NBuffers - 1; - /* Initialize the clock sweep pointer */ + /* Initialize the clock-sweep pointer */ pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0); /* Clear statistics */ @@ -759,7 +759,7 @@ GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state) * * If usage_count is 0 or 1 then the buffer is fair game (we expect 1, * since our own previous usage of the ring element would have left it - * there, but it might've been decremented by clock sweep since then). A + * there, but it might've been decremented by clock-sweep since then). A * higher usage_count indicates someone else has touched the buffer, so we * shouldn't re-use it. */ diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 3c0d20f4659d2..04fef13409b02 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -229,7 +229,7 @@ GetLocalVictimBuffer(void) ResourceOwnerEnlarge(CurrentResourceOwner); /* - * Need to get a new buffer. We use a clock sweep algorithm (essentially + * Need to get a new buffer. We use a clock-sweep algorithm (essentially * the same as what freelist.c does now...) */ trycounter = NLocBuffer; diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 52a71b138f736..3a210c710f633 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -80,8 +80,8 @@ StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32, * The maximum allowed value of usage_count represents a tradeoff between * accuracy and speed of the clock-sweep buffer management algorithm. A * large value (comparable to NBuffers) would approximate LRU semantics. - * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of - * clock sweeps to find a free buffer, so in practice we don't want the + * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of the + * clock-sweep hand to find a free buffer, so in practice we don't want the * value to be very large. */ #define BM_MAX_USAGE_COUNT 5 From 5f2b9b3d904c2fc85ef3578939a1d82db1e44e14 Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Thu, 10 Jul 2025 14:45:32 -0400 Subject: [PATCH 5/7] Eliminate the freelist from the buffer manager and depend on clock-sweep This set of changes removes the list of available buffers and instead simply uses the clock-sweep algorithm to find and return an available buffer. While on the surface this appears to be removing an optimization it is in fact eliminating code that induces overhead in the form of synchronization that is problemmatic for multi-core systems. This also removes the have_free_buffer() function and simply caps the pg_autoprewarm process to at most NBuffers. --- contrib/pg_prewarm/autoprewarm.c | 31 ++++--- src/backend/storage/buffer/README | 40 +++------ src/backend/storage/buffer/buf_init.c | 9 -- src/backend/storage/buffer/bufmgr.c | 29 +------ src/backend/storage/buffer/freelist.c | 119 +------------------------- src/include/storage/buf_internals.h | 12 +-- 6 files changed, 32 insertions(+), 208 deletions(-) diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c index c01b9c7e6a4d6..2722b0bb44375 100644 --- a/contrib/pg_prewarm/autoprewarm.c +++ b/contrib/pg_prewarm/autoprewarm.c @@ -370,6 +370,16 @@ apw_load_buffers(void) apw_state->prewarm_start_idx = apw_state->prewarm_stop_idx = 0; apw_state->prewarmed_blocks = 0; + + /* Don't prewarm more than we can fit. */ + if (num_elements > NBuffers) + { + num_elements = NBuffers; + ereport(LOG, + (errmsg("autoprewarm: capping prewarmed blocks to %d (shared_buffers size)", + NBuffers))); + } + /* Get the info position of the first block of the next database. */ while (apw_state->prewarm_start_idx < num_elements) { @@ -410,10 +420,6 @@ apw_load_buffers(void) apw_state->database = current_db; Assert(apw_state->prewarm_start_idx < apw_state->prewarm_stop_idx); - /* If we've run out of free buffers, don't launch another worker. */ - if (!have_free_buffer()) - break; - /* * Likewise, don't launch if we've already been told to shut down. * (The launch would fail anyway, but we might as well skip it.) @@ -462,12 +468,6 @@ apw_read_stream_next_block(ReadStream *stream, { BlockInfoRecord blk = p->block_info[p->pos]; - if (!have_free_buffer()) - { - p->pos = apw_state->prewarm_stop_idx; - return InvalidBlockNumber; - } - if (blk.tablespace != p->tablespace) return InvalidBlockNumber; @@ -523,10 +523,10 @@ autoprewarm_database_main(Datum main_arg) blk = block_info[i]; /* - * Loop until we run out of blocks to prewarm or until we run out of free + * Loop until we run out of blocks to prewarm or until we run out of * buffers. */ - while (i < apw_state->prewarm_stop_idx && have_free_buffer()) + while (i < apw_state->prewarm_stop_idx) { Oid tablespace = blk.tablespace; RelFileNumber filenumber = blk.filenumber; @@ -568,14 +568,13 @@ autoprewarm_database_main(Datum main_arg) /* * We have a relation; now let's loop until we find a valid fork of - * the relation or we run out of free buffers. Once we've read from - * all valid forks or run out of options, we'll close the relation and + * the relation or we run out of buffers. Once we've read from all + * valid forks or run out of options, we'll close the relation and * move on. */ while (i < apw_state->prewarm_stop_idx && blk.tablespace == tablespace && - blk.filenumber == filenumber && - have_free_buffer()) + blk.filenumber == filenumber) { ForkNumber forknum = blk.forknum; BlockNumber nblocks; diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README index 4b13da5d7add8..119f31b5d6584 100644 --- a/src/backend/storage/buffer/README +++ b/src/backend/storage/buffer/README @@ -128,11 +128,11 @@ independently. If it is necessary to lock more than one partition at a time, they must be locked in partition-number order to avoid risk of deadlock. * A separate system-wide spinlock, buffer_strategy_lock, provides mutual -exclusion for operations that access the buffer free list or select -buffers for replacement. A spinlock is used here rather than a lightweight -lock for efficiency; no other locks of any sort should be acquired while -buffer_strategy_lock is held. This is essential to allow buffer replacement -to happen in multiple backends with reasonable concurrency. +exclusion for operations that select buffers for replacement. A spinlock is +used here rather than a lightweight lock for efficiency; no other locks of any +sort should be acquired while buffer_strategy_lock is held. This is essential +to allow buffer replacement to happen in multiple backends with reasonable +concurrency. * Each buffer header contains a spinlock that must be taken when examining or changing fields of that buffer header. This allows operations such as @@ -158,18 +158,8 @@ unset by sleeping on the buffer's condition variable. Normal Buffer Replacement Strategy ---------------------------------- -There is a "free list" of buffers that are prime candidates for replacement. -In particular, buffers that are completely free (contain no valid page) are -always in this list. We could also throw buffers into this list if we -consider their pages unlikely to be needed soon; however, the current -algorithm never does that. The list is singly-linked using fields in the -buffer headers; we maintain head and tail pointers in global variables. -(Note: although the list links are in the buffer headers, they are -considered to be protected by the buffer_strategy_lock, not the buffer-header -spinlocks.) To choose a victim buffer to recycle when there are no free -buffers available, we use a simple clock-sweep algorithm, which avoids the -need to take system-wide locks during common operations. It works like -this: +To choose a victim buffer to recycle we use a simple clock-sweep algorithm. It +works like this: Each buffer header contains a usage counter, which is incremented (up to a small limit value) whenever the buffer is pinned. (This requires only the @@ -184,20 +174,14 @@ The algorithm for a process that needs to obtain a victim buffer is: 1. Obtain buffer_strategy_lock. -2. If buffer free list is nonempty, remove its head buffer. Release -buffer_strategy_lock. If the buffer is pinned or has a nonzero usage count, -it cannot be used; ignore it go back to step 1. Otherwise, pin the buffer, -and return it. +2. Select the buffer pointed to by nextVictimBuffer, and circularly advance +nextVictimBuffer for next time. Release buffer_strategy_lock. -3. Otherwise, the buffer free list is empty. Select the buffer pointed to by -nextVictimBuffer, and circularly advance nextVictimBuffer for next time. -Release buffer_strategy_lock. - -4. If the selected buffer is pinned or has a nonzero usage count, it cannot +3. If the selected buffer is pinned or has a nonzero usage count, it cannot be used. Decrement its usage count (if nonzero), reacquire buffer_strategy_lock, and return to step 3 to examine the next buffer. -5. Pin the selected buffer, and return. +4. Pin the selected buffer, and return. (Note that if the selected buffer is dirty, we will have to write it out before we can recycle it; if someone else pins the buffer meanwhile we will @@ -234,7 +218,7 @@ the ring strategy effectively degrades to the normal strategy. VACUUM uses a ring like sequential scans, however, the size of this ring is controlled by the vacuum_buffer_usage_limit GUC. Dirty pages are not removed -from the ring. Instead, WAL is flushed if needed to allow reuse of the +from the ring. Instead, the WAL is flushed if needed to allow reuse of the buffers. Before introducing the buffer ring strategy in 8.3, VACUUM's buffers were sent to the freelist, which was effectively a buffer ring of 1 buffer, resulting in excessive WAL flushing. diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index ed1dc488a42b4..6fd3a6bbac5ea 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -128,20 +128,11 @@ BufferManagerShmemInit(void) pgaio_wref_clear(&buf->io_wref); - /* - * Initially link all the buffers together as unused. Subsequent - * management of this list is done by freelist.c. - */ - buf->freeNext = i + 1; - LWLockInitialize(BufferDescriptorGetContentLock(buf), LWTRANCHE_BUFFER_CONTENT); ConditionVariableInit(BufferDescriptorGetIOCV(buf)); } - - /* Correct last entry of linked list */ - GetBufferDescriptor(NBuffers - 1)->freeNext = FREENEXT_END_OF_LIST; } /* Init other shared buffer-management stuff */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 396b053b3faf3..719a5bb6f97ea 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2094,12 +2094,6 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, */ UnpinBuffer(victim_buf_hdr); - /* - * The victim buffer we acquired previously is clean and unused, let - * it be found again quickly - */ - StrategyFreeBuffer(victim_buf_hdr); - /* remaining code should match code at top of routine */ existing_buf_hdr = GetBufferDescriptor(existing_buf_id); @@ -2158,8 +2152,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, } /* - * InvalidateBuffer -- mark a shared buffer invalid and return it to the - * freelist. + * InvalidateBuffer -- mark a shared buffer invalid. * * The buffer header spinlock must be held at entry. We drop it before * returning. (This is sane because the caller must have locked the @@ -2257,11 +2250,6 @@ InvalidateBuffer(BufferDesc *buf) * Done with mapping lock. */ LWLockRelease(oldPartitionLock); - - /* - * Insert the buffer at the head of the list of free buffers. - */ - StrategyFreeBuffer(buf); } /* @@ -2679,11 +2667,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, { BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1); - /* - * The victim buffer we acquired previously is clean and unused, - * let it be found again quickly - */ - StrategyFreeBuffer(buf_hdr); UnpinBuffer(buf_hdr); } @@ -2756,12 +2739,6 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, valid = PinBuffer(existing_hdr, strategy); LWLockRelease(partition_lock); - - /* - * The victim buffer we acquired previously is clean and unused, - * let it be found again quickly - */ - StrategyFreeBuffer(victim_buf_hdr); UnpinBuffer(victim_buf_hdr); buffers[i] = BufferDescriptorGetBuffer(existing_hdr); @@ -3658,8 +3635,8 @@ BgBufferSync(WritebackContext *wb_context) uint32 new_recent_alloc; /* - * Find out where the freelist clock-sweep currently is, and how many - * buffer allocations have happened since our last call. + * Find out where the clock-sweep currently is, and how many buffer + * allocations have happened since our last call. */ strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc); diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index cd94a7d8a7b39..7d59a92bd1a88 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -39,14 +39,6 @@ typedef struct */ pg_atomic_uint32 nextVictimBuffer; - int firstFreeBuffer; /* Head of list of unused buffers */ - int lastFreeBuffer; /* Tail of list of unused buffers */ - - /* - * NOTE: lastFreeBuffer is undefined when firstFreeBuffer is -1 (that is, - * when the list is empty) - */ - /* * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. @@ -163,23 +155,6 @@ ClockSweepTick(void) return victim; } -/* - * have_free_buffer -- a lockless check to see if there is a free buffer in - * buffer pool. - * - * If the result is true that will become stale once free buffers are moved out - * by other operations, so the caller who strictly want to use a free buffer - * should not call this. - */ -bool -have_free_buffer(void) -{ - if (StrategyControl->firstFreeBuffer >= 0) - return true; - else - return false; -} - /* * StrategyGetBuffer * @@ -249,69 +224,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r */ pg_atomic_fetch_add_u32(&StrategyControl->numBufferAllocs, 1); - /* - * First check, without acquiring the lock, whether there's buffers in the - * freelist. Since we otherwise don't require the spinlock in every - * StrategyGetBuffer() invocation, it'd be sad to acquire it here - - * uselessly in most cases. That obviously leaves a race where a buffer is - * put on the freelist but we don't see the store yet - but that's pretty - * harmless, it'll just get used during the next buffer acquisition. - * - * If there's buffers on the freelist, acquire the spinlock to pop one - * buffer of the freelist. Then check whether that buffer is usable and - * repeat if not. - * - * Note that the freeNext fields are considered to be protected by the - * buffer_strategy_lock not the individual buffer spinlocks, so it's OK to - * manipulate them without holding the spinlock. - */ - if (StrategyControl->firstFreeBuffer >= 0) - { - while (true) - { - /* Acquire the spinlock to remove element from the freelist */ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); - - if (StrategyControl->firstFreeBuffer < 0) - { - SpinLockRelease(&StrategyControl->buffer_strategy_lock); - break; - } - - buf = GetBufferDescriptor(StrategyControl->firstFreeBuffer); - Assert(buf->freeNext != FREENEXT_NOT_IN_LIST); - - /* Unconditionally remove buffer from freelist */ - StrategyControl->firstFreeBuffer = buf->freeNext; - buf->freeNext = FREENEXT_NOT_IN_LIST; - - /* - * Release the lock so someone else can access the freelist while - * we check out this buffer. - */ - SpinLockRelease(&StrategyControl->buffer_strategy_lock); - - /* - * If the buffer is pinned or has a nonzero usage_count, we cannot - * use it; discard it and retry. (This can only happen if VACUUM - * put a valid buffer in the freelist and then someone else used - * it before we got to it. It's probably impossible altogether as - * of 8.3, but we'd better check anyway.) - */ - local_buf_state = LockBufHdr(buf); - if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0 - && BUF_STATE_GET_USAGECOUNT(local_buf_state) == 0) - { - if (strategy != NULL) - AddBufferToRing(strategy, buf); - *buf_state = local_buf_state; - return buf; - } - UnlockBufHdr(buf, local_buf_state); - } - } - - /* Nothing on the freelist, so run the "clock-sweep" algorithm */ + /* Use the "clock sweep" algorithm to find a free buffer */ trycounter = NBuffers; for (;;) { @@ -356,29 +269,6 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r } } -/* - * StrategyFreeBuffer: put a buffer on the freelist - */ -void -StrategyFreeBuffer(BufferDesc *buf) -{ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); - - /* - * It is possible that we are told to put something in the freelist that - * is already in it; don't screw up the list if so. - */ - if (buf->freeNext == FREENEXT_NOT_IN_LIST) - { - buf->freeNext = StrategyControl->firstFreeBuffer; - if (buf->freeNext < 0) - StrategyControl->lastFreeBuffer = buf->buf_id; - StrategyControl->firstFreeBuffer = buf->buf_id; - } - - SpinLockRelease(&StrategyControl->buffer_strategy_lock); -} - /* * StrategySyncStart -- tell BgBufferSync where to start syncing * @@ -504,13 +394,6 @@ StrategyInitialize(bool init) SpinLockInit(&StrategyControl->buffer_strategy_lock); - /* - * Grab the whole linked list of free buffers for our strategy. We - * assume it was previously set up by BufferManagerShmemInit(). - */ - StrategyControl->firstFreeBuffer = 0; - StrategyControl->lastFreeBuffer = NBuffers - 1; - /* Initialize the clock-sweep pointer */ pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0); diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 3a210c710f633..9fcc94ef02dff 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -217,8 +217,7 @@ BufMappingPartitionLockByIndex(uint32 index) * single atomic variable. This layout allow us to do some operations in a * single atomic operation, without actually acquiring and releasing spinlock; * for instance, increase or decrease refcount. buf_id field never changes - * after initialization, so does not need locking. freeNext is protected by - * the buffer_strategy_lock not buffer header lock. The LWLock can take care + * after initialization, so does not need locking. The LWLock can take care * of itself. The buffer header lock is *not* used to control access to the * data in the buffer! * @@ -264,7 +263,6 @@ typedef struct BufferDesc pg_atomic_uint32 state; int wait_backend_pgprocno; /* backend of pin-count waiter */ - int freeNext; /* link in freelist chain */ PgAioWaitRef io_wref; /* set iff AIO is in progress */ LWLock content_lock; /* to lock access to buffer contents */ @@ -360,13 +358,6 @@ BufferDescriptorGetContentLock(const BufferDesc *bdesc) return (LWLock *) (&bdesc->content_lock); } -/* - * The freeNext field is either the index of the next freelist entry, - * or one of these special values: - */ -#define FREENEXT_END_OF_LIST (-1) -#define FREENEXT_NOT_IN_LIST (-2) - /* * Functions for acquiring/releasing a shared buffer header's spinlock. Do * not apply these to local buffers! @@ -453,7 +444,6 @@ extern void StrategyNotifyBgWriter(int bgwprocno); extern Size StrategyShmemSize(void); extern void StrategyInitialize(bool init); -extern bool have_free_buffer(void); /* buf_table.c */ extern Size BufTableShmemSize(int size); From a085c5008973af90cf75f499d2733328d81d383e Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Tue, 12 Aug 2025 09:33:38 -0400 Subject: [PATCH 6/7] Remove the need for a buffer_strategy_lock Combine nextVictimBuffer and completePasses into a single uint64 called clockSweepCounter so as to eliminate the need for the buffer_strategy_lock that synchronizes their progression. Increment this counter atomically by 1 at each tick. The hand's location is the counter modulo NBuffers, the number of complete passes is the counter divided by NBuffers. Overflow of the clockSweepCounter would require ~10 years of continuous operation at ~59 billion ticks per-second, so unlikely that we consider this to be impossible. --- src/backend/storage/buffer/README | 70 +++++++------ src/backend/storage/buffer/bufmgr.c | 8 ++ src/backend/storage/buffer/freelist.c | 135 ++++++++------------------ src/include/storage/buf_internals.h | 2 +- 4 files changed, 86 insertions(+), 129 deletions(-) diff --git a/src/backend/storage/buffer/README b/src/backend/storage/buffer/README index 119f31b5d6584..52d5b2c4069aa 100644 --- a/src/backend/storage/buffer/README +++ b/src/backend/storage/buffer/README @@ -127,11 +127,10 @@ bits of the tag's hash value. The rules stated above apply to each partition independently. If it is necessary to lock more than one partition at a time, they must be locked in partition-number order to avoid risk of deadlock. -* A separate system-wide spinlock, buffer_strategy_lock, provides mutual -exclusion for operations that select buffers for replacement. A spinlock is -used here rather than a lightweight lock for efficiency; no other locks of any -sort should be acquired while buffer_strategy_lock is held. This is essential -to allow buffer replacement to happen in multiple backends with reasonable +* Operations that select buffers for replacement don't require a lock, but +rather use atomic operations to ensure coordination across backends when +accessing members of the BufferStrategyControl datastructure. This allows +buffer replacement to happen in multiple backends with reasonable concurrency. * Each buffer header contains a spinlock that must be taken when examining @@ -158,30 +157,39 @@ unset by sleeping on the buffer's condition variable. Normal Buffer Replacement Strategy ---------------------------------- -To choose a victim buffer to recycle we use a simple clock-sweep algorithm. It -works like this: +To choose a victim buffer to recycle we use a simple clock-sweep algorithm, +done carefully this can be accomplished without the need to take a system-wide +lock. The strategy works like this: + +The "clock hand" is a buffer index that conceptually moves circularly through +all the available buffers in the range of 0 to NBuffers-1. Each time the hand +returns to 0 is a "complete pass" of the buffers managed by the clock. The +hand progresses one "tick" at a time around the clock identifying a potential +"victim". These two values, the hand's position and the number of complete +passes, must be consistent across backends. + +In this implementation the hand's position on the clock is determined by the +value of clockSweepCounter modulo the value of NBuffers. The value of +clockSweepCounter, a uint64, is atomically incremented by 1 at each tick of the +clock. The number of complete passes is therefor the clockSweepCounter divided +by NBuffers. This reduces the coordination across backends to either an atomic +read or an atomic fetch add. Each buffer header contains a usage counter, which is incremented (up to a small limit value) whenever the buffer is pinned. (This requires only the buffer header spinlock, which would have to be taken anyway to increment the buffer reference count, so it's nearly free.) -The "clock hand" is a buffer index, nextVictimBuffer, that moves circularly -through all the available buffers. nextVictimBuffer is protected by the -buffer_strategy_lock. - The algorithm for a process that needs to obtain a victim buffer is: -1. Obtain buffer_strategy_lock. - -2. Select the buffer pointed to by nextVictimBuffer, and circularly advance -nextVictimBuffer for next time. Release buffer_strategy_lock. +1. Move around the clock one tick. Atomically read and advance +clockSweepCounter by 1 and return its previous value modulo NBuffers. -3. If the selected buffer is pinned or has a nonzero usage count, it cannot -be used. Decrement its usage count (if nonzero), reacquire -buffer_strategy_lock, and return to step 3 to examine the next buffer. +2. If the selected buffer is pinned or has a nonzero usage count, it cannot be +used. Decrement its usage count (if nonzero), return to step 1 to examine the +next buffer. -4. Pin the selected buffer, and return. +3. Pin the selected buffer, and return. (Note that if the selected buffer is dirty, we will have to write it out before we can recycle it; if someone else pins the buffer meanwhile we will @@ -237,19 +245,17 @@ Background Writer's Processing ------------------------------ The background writer is designed to write out pages that are likely to be -recycled soon, thereby offloading the writing work from active backends. -To do this, it scans forward circularly from the current position of -nextVictimBuffer (which it does not change!), looking for buffers that are -dirty and not pinned nor marked with a positive usage count. It pins, -writes, and releases any such buffer. - -If we can assume that reading nextVictimBuffer is an atomic action, then -the writer doesn't even need to take buffer_strategy_lock in order to look -for buffers to write; it needs only to spinlock each buffer header for long -enough to check the dirtybit. Even without that assumption, the writer -only needs to take the lock long enough to read the variable value, not -while scanning the buffers. (This is a very substantial improvement in -the contention cost of the writer compared to PG 8.0.) +recycled soon, thereby offloading the writing work from active backends. To do +this, it scans forward circularly from the current position of the clock-sweep +hand (read atomically and not modified), looking for buffers that are dirty and +not pinned nor marked with a positive usage count. It pins, writes, and +releases any such buffer. + +It only needs to spinlock each buffer header for long enough to check the +dirtybit. Even without that assumption, the writer only needs to take the lock +long enough to read the variable value, not while scanning the buffers. (This +is a very substantial improvement in the contention cost of the writer compared +to PG 8.0.) The background writer takes shared content lock on a buffer while writing it out (and anyone else who flushes buffer contents to disk must do so too). diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 719a5bb6f97ea..d0c14158115c1 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3666,6 +3666,14 @@ BgBufferSync(WritebackContext *wb_context) { int32 passes_delta = strategy_passes - prev_strategy_passes; + /* + * It would take ~10 years of continuous operation at ~59 billion + * clock ticks per-second to overflow the uint64 value of + * clockSweepCounter. We consider this impossible and memorialize that + * decision with this assert. + */ + Assert(prev_strategy_passes <= strategy_passes); + strategy_delta = strategy_buf_id - prev_strategy_buf_id; strategy_delta += (long) passes_delta * NBuffers; diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 7d59a92bd1a88..7d68f2227b3cc 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -29,21 +29,18 @@ */ typedef struct { - /* Spinlock: protects the values below */ - slock_t buffer_strategy_lock; - /* - * clock-sweep hand: index of next buffer to consider grabbing. Note that - * this isn't a concrete buffer - we only ever increase the value. So, to - * get an actual buffer, it needs to be used modulo NBuffers. + * The clock-sweep counter is atomically updated by 1 at every tick. Use + * the macro CLOCKSWEEP_HAND() to find the location of the hand on the + * clock. Use CLOCKSWEEP_PASSES() to calculate the number of times the + * clock-sweep hand has made a complete pass around the clock. */ - pg_atomic_uint32 nextVictimBuffer; + pg_atomic_uint64 clockSweepCounter; /* * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. */ - uint32 completePasses; /* Complete cycles of the clock-sweep */ pg_atomic_uint32 numBufferAllocs; /* Buffers allocated since last reset */ /* @@ -83,76 +80,47 @@ typedef struct BufferAccessStrategyData Buffer buffers[FLEXIBLE_ARRAY_MEMBER]; } BufferAccessStrategyData; - /* Prototypes for internal functions */ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_state); static void AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf); +/* + * The clock-sweep counter is a uint64 but the clock hand can never be larger + * than a uint32. Enforce that contract uniformly using this macro. + */ +#define CLOCKSWEEP_HAND(counter) \ ((uint32) (counter)) % NBuffers + +/* + * The number of times the clock hand has made a complete pass around the clock + * visiting all the available buffers is the counter divided by NBuffers. + */ +#define CLOCKSWEEP_PASSES(counter) \ (uint32) ((counter) / NBuffers) + /* * ClockSweepTick - Helper routine for StrategyGetBuffer() * * Move the clock hand one buffer ahead of its current position and return the - * id of the buffer now under the hand. + * index of the buffer previously under the hand. */ static inline uint32 ClockSweepTick(void) { - uint32 victim; + uint64 counter; + uint32 hand; /* * Atomically move hand ahead one buffer - if there's several processes * doing this, this can lead to buffers being returned slightly out of * apparent order. */ - victim = - pg_atomic_fetch_add_u32(&StrategyControl->nextVictimBuffer, 1); - - if (victim >= NBuffers) - { - uint32 originalVictim = victim; + counter = pg_atomic_fetch_add_u64(&StrategyControl->clockSweepCounter, 1); - /* always wrap what we look up in BufferDescriptors */ - victim = victim % NBuffers; - - /* - * If we're the one that just caused a wraparound, force - * completePasses to be incremented while holding the spinlock. We - * need the spinlock so StrategySyncStart() can return a consistent - * value consisting of nextVictimBuffer and completePasses. - */ - if (victim == 0) - { - uint32 expected; - uint32 wrapped; - bool success = false; - - expected = originalVictim + 1; - - while (!success) - { - /* - * Acquire the spinlock while increasing completePasses. That - * allows other readers to read nextVictimBuffer and - * completePasses in a consistent manner which is required for - * StrategySyncStart(). In theory delaying the increment - * could lead to an overflow of nextVictimBuffers, but that's - * highly unlikely and wouldn't be particularly harmful. - */ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); + hand = CLOCKSWEEP_HAND(counter); + Assert(hand < NBuffers); - wrapped = expected % NBuffers; - - success = pg_atomic_compare_exchange_u32(&StrategyControl->nextVictimBuffer, - &expected, wrapped); - if (success) - StrategyControl->completePasses++; - SpinLockRelease(&StrategyControl->buffer_strategy_lock); - } - } - } - return victim; + return hand; } /* @@ -177,10 +145,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r *from_ring = false; - /* - * If given a strategy object, see whether it can select a buffer. We - * assume strategy objects don't need buffer_strategy_lock. - */ + /* If given a strategy object, see whether it can select a buffer */ if (strategy != NULL) { buf = GetBufferFromRing(strategy, buf_state); @@ -275,37 +240,25 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state, bool *from_r * The result is the buffer index of the best buffer to sync first. * BgBufferSync() will proceed circularly around the buffer array from there. * - * In addition, we return the completed-pass count (which is effectively - * the higher-order bits of nextVictimBuffer) and the count of recent buffer - * allocs if non-NULL pointers are passed. The alloc count is reset after - * being read. + * In addition, we return the completed-pass count and the count of recent + * buffer allocs if non-NULL pointers are passed. The alloc count is reset + * after being read. */ -int +uint32 StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) { - uint32 nextVictimBuffer; - int result; + uint64 counter; + uint32 result; - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); - nextVictimBuffer = pg_atomic_read_u32(&StrategyControl->nextVictimBuffer); - result = nextVictimBuffer % NBuffers; + counter = pg_atomic_read_u64(&StrategyControl->clockSweepCounter); + result = CLOCKSWEEP_HAND(counter); if (complete_passes) - { - *complete_passes = StrategyControl->completePasses; - - /* - * Additionally add the number of wraparounds that happened before - * completePasses could be incremented. C.f. ClockSweepTick(). - */ - *complete_passes += nextVictimBuffer / NBuffers; - } + *complete_passes = CLOCKSWEEP_PASSES(counter); if (num_buf_alloc) - { *num_buf_alloc = pg_atomic_exchange_u32(&StrategyControl->numBufferAllocs, 0); - } - SpinLockRelease(&StrategyControl->buffer_strategy_lock); + return result; } @@ -320,14 +273,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) void StrategyNotifyBgWriter(int bgwprocno) { - /* - * We acquire buffer_strategy_lock just to ensure that the store appears - * atomic to StrategyGetBuffer. The bgwriter should call this rather - * infrequently, so there's no performance penalty from being safe. - */ - SpinLockAcquire(&StrategyControl->buffer_strategy_lock); StrategyControl->bgwprocno = bgwprocno; - SpinLockRelease(&StrategyControl->buffer_strategy_lock); } @@ -392,13 +338,10 @@ StrategyInitialize(bool init) */ Assert(init); - SpinLockInit(&StrategyControl->buffer_strategy_lock); - - /* Initialize the clock-sweep pointer */ - pg_atomic_init_u32(&StrategyControl->nextVictimBuffer, 0); + /* Initialize combined clock-sweep pointer/complete passes counter */ + pg_atomic_init_u64(&StrategyControl->clockSweepCounter, 0); /* Clear statistics */ - StrategyControl->completePasses = 0; pg_atomic_init_u32(&StrategyControl->numBufferAllocs, 0); /* No pending notification */ @@ -714,9 +657,9 @@ IOContextForStrategy(BufferAccessStrategy strategy) * When a nondefault strategy is used, the buffer manager calls this function * when it turns out that the buffer selected by StrategyGetBuffer needs to * be written out and doing so would require flushing WAL too. This gives us - * a chance to choose a different victim. + * a chance to choose a different buffer. * - * Returns true if buffer manager should ask for a new victim, and false + * Returns true if buffer manager should ask for a new buffer, and false * if this buffer should be written and re-used. */ bool diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h index 9fcc94ef02dff..b6ff361f2dd50 100644 --- a/src/include/storage/buf_internals.h +++ b/src/include/storage/buf_internals.h @@ -439,7 +439,7 @@ extern void StrategyFreeBuffer(BufferDesc *buf); extern bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf, bool from_ring); -extern int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc); +extern uint32 StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc); extern void StrategyNotifyBgWriter(int bgwprocno); extern Size StrategyShmemSize(void); From 47fe068a3f7d7bc08c1677aee503272f99b8d6ed Mon Sep 17 00:00:00 2001 From: Greg Burd Date: Tue, 12 Aug 2025 10:58:52 -0400 Subject: [PATCH 7/7] Optimize modulo and division used in clock-sweep algorithm Improve the performance of the buffer manager by replacing the modulo and division operations with a technique described in the paper "Division by Invariant Integers using Multiplication" [1]. Our implementation is inspired by the MIT Licensed "fastdiv" [2]. This algorithm provides accurate division and modulo in constant time that is pipeline and ALU friendly and estimated to take about ~12-18 cycles (vs 26-90 for hardware division). Because our divisor (NBuffers) is fixed at startup so we need only calculate the constant used by it once. [1] https://gmplib.org/~tege/divcnst-pldi94.pdf [2] https://github.com/jmtilli/fastdiv --- src/backend/storage/buffer/freelist.c | 106 ++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 8 deletions(-) diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c index 7d68f2227b3cc..96ae21fb1529a 100644 --- a/src/backend/storage/buffer/freelist.c +++ b/src/backend/storage/buffer/freelist.c @@ -31,12 +31,28 @@ typedef struct { /* * The clock-sweep counter is atomically updated by 1 at every tick. Use - * the macro CLOCKSWEEP_HAND() to find the location of the hand on the - * clock. Use CLOCKSWEEP_PASSES() to calculate the number of times the + * the function ClockSweepHand() to find the location of the hand on the + * clock. Use ClockSweepPasses() to calculate the number of times the * clock-sweep hand has made a complete pass around the clock. */ pg_atomic_uint64 clockSweepCounter; + /* + * Division and modulo can be expensive to calculate repeatedly. Given + * that the buffer manager is a very hot code path we implement a more + * efficient method based on using "Division by invariant Integers using + * Multiplication" (https://gmplib.org/~tege/divcnst-pldi94.pdf) by + * Granlund-Montgomery. Our implementation below was inspired by the MIT + * Licensed "fastdiv" (https://github.com/jmtilli/fastdiv). + */ + struct + { + uint32 mul; + uint32 mod; + uint8 shift1:1; + uint8 shift2:7; + } md; + /* * Statistics. These counters should be wide enough that they can't * overflow during a single bgwriter cycle. @@ -86,17 +102,75 @@ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, static void AddBufferToRing(BufferAccessStrategy strategy, BufferDesc *buf); +static inline uint32 +InvariantDivision(uint64 n) +{ + /* Compute quotient using multiplication */ + uint64 product = n * StrategyControl->md.mul; + uint32 quotient = (uint32) (product >> 32); + + /* + * The invariant multiplication gives us an approximation that may be off + * by 1. + */ + n -= quotient; + n >>= StrategyControl->md.shift1; + n += quotient; + n >>= StrategyControl->md.shift2; + + return n; +} + +static inline uint32 +InvariantModulo(uint64 n) +{ + /* Compute quotient using multiplication */ + uint64 product = n * StrategyControl->md.mul; + uint32 quotient = (uint32) (product >> 32); + uint32 on = n; + + /* + * The invariant multiplication gives us an approximation that may be off + * by 1. + */ + n -= quotient; + n >>= StrategyControl->md.shift1; + n += quotient; + n >>= StrategyControl->md.shift2; + + quotient = StrategyControl->md.mod * n; + return on - quotient; +} + /* * The clock-sweep counter is a uint64 but the clock hand can never be larger - * than a uint32. Enforce that contract uniformly using this macro. + * than a uint32. */ -#define CLOCKSWEEP_HAND(counter) \ ((uint32) (counter)) % NBuffers +static inline uint32 +ClockSweepHand(uint64 counter) +{ + uint32 result = InvariantModulo(counter); + + Assert(result < NBuffers); + Assert(result == (uint32) counter % NBuffers); + + return result; +} /* * The number of times the clock hand has made a complete pass around the clock * visiting all the available buffers is the counter divided by NBuffers. */ -#define CLOCKSWEEP_PASSES(counter) \ (uint32) ((counter) / NBuffers) +static inline uint32 +ClockSweepPasses(uint64 counter) +{ + uint32 result = InvariantDivision(counter); + + /* Verify our result matches standard division */ + Assert(result == (uint32) (counter / NBuffers)); + + return result; +} /* * ClockSweepTick - Helper routine for StrategyGetBuffer() @@ -117,7 +191,7 @@ ClockSweepTick(void) */ counter = pg_atomic_fetch_add_u64(&StrategyControl->clockSweepCounter, 1); - hand = CLOCKSWEEP_HAND(counter); + hand = ClockSweepHand(counter); Assert(hand < NBuffers); return hand; @@ -251,10 +325,10 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc) uint32 result; counter = pg_atomic_read_u64(&StrategyControl->clockSweepCounter); - result = CLOCKSWEEP_HAND(counter); + result = ClockSweepHand(counter); if (complete_passes) - *complete_passes = CLOCKSWEEP_PASSES(counter); + *complete_passes = ClockSweepPasses(counter); if (num_buf_alloc) *num_buf_alloc = pg_atomic_exchange_u32(&StrategyControl->numBufferAllocs, 0); @@ -333,11 +407,27 @@ StrategyInitialize(bool init) if (!found) { + uint8 shift2 = 0; + uint32 divisor = NBuffers; + uint8 is_pow2 = (divisor & (divisor - 1)) == 0 ? 0 : 1; + /* * Only done once, usually in postmaster */ Assert(init); + /* Calculate the constants used for speeding up division and modulo */ + Assert(NBuffers > 0 && NBuffers < (1U << 31)); + + /* shift2 = ilog(NBuffers) */ + for (uint32 n = divisor; n >>= 1;) + shift2++; + + StrategyControl->md.shift1 = is_pow2; + StrategyControl->md.shift2 = shift2; + StrategyControl->md.mod = NBuffers; + StrategyControl->md.mul = (1ULL << (32 + is_pow2 + shift2)) / NBuffers + 1; + /* Initialize combined clock-sweep pointer/complete passes counter */ pg_atomic_init_u64(&StrategyControl->clockSweepCounter, 0);