diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9c4b65f3c273..e7081c444c5f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -38,6 +38,45 @@ jobs:
minimal: true
ghc: true
+ - name: Debug R2 upload test
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.R2_METRICS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_METRICS_SECRET_ACCESS_KEY }}
+ R2_ENDPOINT: ${{ secrets.R2_METRICS_ENDPOINT }}
+ R2_PUBLIC_URL: ${{ vars.R2_METRICS_PUBLIC_URL }}
+ R2_BUCKET: ${{ vars.R2_METRICS_BUCKET_NAME }}
+ shell: devx {0}
+ run: |
+ echo "Testing R2 upload connectivity..."
+ echo "AWS_ACCESS_KEY_ID length: ${#AWS_ACCESS_KEY_ID}"
+ echo "AWS_SECRET_ACCESS_KEY length: ${#AWS_SECRET_ACCESS_KEY}"
+ echo "R2_ENDPOINT length: ${#R2_ENDPOINT}"
+ echo "R2_BUCKET length: ${#R2_BUCKET}"
+
+ # Hash secrets for comparison (without revealing them)
+ echo "Hash comparison (expected vs actual):"
+ echo "ACCESS_KEY_ID: 5ca6a5e1... vs $(echo "${AWS_ACCESS_KEY_ID}" | tr -d '\n' | sha256sum | cut -c1-8)..."
+ echo "SECRET_ACCESS_KEY: 9c22a347... vs $(echo "${AWS_SECRET_ACCESS_KEY}" | tr -d '\n' | sha256sum | cut -c1-8)..."
+ echo "R2_ENDPOINT: e2178c75... vs $(echo "${R2_ENDPOINT}" | tr -d '\n' | sha256sum | cut -c1-8)..."
+ echo "R2_BUCKET: 617649ec... vs $(echo "${R2_BUCKET}" | tr -d '\n' | sha256sum | cut -c1-8)..."
+
+ # Create test file
+ echo "test upload $(date)" > /tmp/r2-test.txt
+
+ # Try upload (--region auto is required for R2)
+ # Use AWS_ENDPOINT_URL env var, bucket from variable
+ export AWS_ENDPOINT_URL="${R2_ENDPOINT}"
+ export AWS_DEFAULT_REGION=auto
+ nix-shell --keep AWS_ACCESS_KEY_ID --keep AWS_SECRET_ACCESS_KEY --keep AWS_ENDPOINT_URL --keep AWS_DEFAULT_REGION -p awscli2 --run "aws s3 cp /tmp/r2-test.txt s3://${R2_BUCKET}/test/r2-test.txt"
+
+ # Check result
+ if [[ $? -eq 0 ]]; then
+ echo "SUCCESS: R2 upload works!"
+ echo "Public URL would be: ${R2_PUBLIC_URL}/test/r2-test.txt"
+ else
+ echo "FAILED: R2 upload did not work"
+ fi
+
- name: Update hackage
shell: devx {0}
run: cabal update
@@ -50,19 +89,130 @@ jobs:
# shell: devx {0}
# run: ./configure
+ - name: Start metrics collection
+ shell: devx {0}
+ run: ./mk/collect-metrics.sh start _build/metrics 0.5
+
- name: Build the bindist
shell: devx {0}
- run: make CABAL=$PWD/_build/stage0/bin/cabal
+ run: make QUIET=1 CABAL=$PWD/_build/stage0/bin/cabal
- - name: Upload artifacts
+ - name: Display build timings
+ if: ${{ !cancelled() }}
+ shell: devx {0}
+ run: make timing-summary
+
+ - name: Upload bindist artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.plat }}-bindist
path: _build/bindist
+ - name: Upload build logs and timing
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }} # upload logs even if the build failed
+ with:
+ name: ${{ matrix.plat }}-build-logs
+ path: |
+ _build/logs/
+ _build/timing/
+
- name: Run the testsuite
shell: devx {0}
- run: make test CABAL=$PWD/_build/stage0/bin/cabal
+ run: make test QUIET=1 CABAL=$PWD/_build/stage0/bin/cabal
+
+ - name: Stop metrics collection
+ if: ${{ !cancelled() }}
+ shell: devx {0}
+ run: ./mk/collect-metrics.sh stop _build/metrics
+
+ - name: Display test timings
+ if: ${{ !cancelled() }}
+ shell: devx {0}
+ run: make timing-summary
+
+ - name: Generate metrics plots
+ if: ${{ !cancelled() }}
+ continue-on-error: true
+ shell: devx {0}
+ run: |
+ # Generate separate build and test SVG plots (wider aspect ratio)
+ nix-shell -p 'python3.withPackages (ps: [ps.matplotlib])' --run 'python3 ./mk/plot-metrics.py _build/metrics _build/timing _build/metrics/metrics'
+
+ - name: Upload metrics plots to R2
+ if: ${{ !cancelled() }}
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.R2_METRICS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_METRICS_SECRET_ACCESS_KEY }}
+ R2_ENDPOINT: ${{ secrets.R2_METRICS_ENDPOINT }}
+ R2_PUBLIC_URL: ${{ vars.R2_METRICS_PUBLIC_URL }}
+ R2_BUCKET: ${{ vars.R2_METRICS_BUCKET_NAME }}
+ shell: devx {0}
+ run: |
+ RUN_ID="${{ github.run_id }}"
+ PLAT="${{ matrix.plat }}"
+
+ export AWS_ENDPOINT_URL="${R2_ENDPOINT}"
+ export AWS_DEFAULT_REGION=auto
+
+ # Upload build plot if it exists
+ if [[ -f "_build/metrics/metrics-build.svg" ]]; then
+ BUILD_PATH="runs/${RUN_ID}/${PLAT}-build.svg"
+ nix-shell --keep AWS_ACCESS_KEY_ID --keep AWS_SECRET_ACCESS_KEY --keep AWS_ENDPOINT_URL --keep AWS_DEFAULT_REGION -p awscli2 --run "aws s3 cp _build/metrics/metrics-build.svg s3://${R2_BUCKET}/${BUILD_PATH} --content-type 'image/svg+xml'" || echo "Failed to upload build plot (non-fatal)"
+ echo "BUILD_PLOT_URL=${R2_PUBLIC_URL}/${BUILD_PATH}" >> $GITHUB_ENV
+ fi
+
+ # Upload test plot if it exists
+ if [[ -f "_build/metrics/metrics-test.svg" ]]; then
+ TEST_PATH="runs/${RUN_ID}/${PLAT}-test.svg"
+ nix-shell --keep AWS_ACCESS_KEY_ID --keep AWS_SECRET_ACCESS_KEY --keep AWS_ENDPOINT_URL --keep AWS_DEFAULT_REGION -p awscli2 --run "aws s3 cp _build/metrics/metrics-test.svg s3://${R2_BUCKET}/${TEST_PATH} --content-type 'image/svg+xml'" || echo "Failed to upload test plot (non-fatal)"
+ echo "TEST_PLOT_URL=${R2_PUBLIC_URL}/${TEST_PATH}" >> $GITHUB_ENV
+ fi
+
+ - name: Write metrics summary
+ if: ${{ !cancelled() }}
+ shell: devx {0}
+ run: |
+ echo "## Build Metrics Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+
+ # Embed build plot if available
+ if [[ -n "${BUILD_PLOT_URL:-}" ]]; then
+ echo "### Build Phases (CPU & Memory)" >> $GITHUB_STEP_SUMMARY
+ echo "
" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ # Embed test plot if available
+ if [[ -n "${TEST_PLOT_URL:-}" ]]; then
+ echo "### Test Phase (CPU & Memory)" >> $GITHUB_STEP_SUMMARY
+ echo "
" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ fi
+
+ echo "### Phase Timings" >> $GITHUB_STEP_SUMMARY
+ echo "| Phase | Duration | Status |" >> $GITHUB_STEP_SUMMARY
+ echo "|-------|----------|--------|" >> $GITHUB_STEP_SUMMARY
+ for phase in cabal stage1 stage2 stage2-utils bindist test; do
+ if [[ -f "_build/timing/$phase.start" && -f "_build/timing/$phase.end" ]]; then
+ start=$(cat "_build/timing/$phase.start")
+ end=$(cat "_build/timing/$phase.end")
+ duration=$((end - start))
+ mins=$((duration / 60))
+ secs=$((duration % 60))
+ status="OK"
+ if [[ -f "_build/timing/$phase.status" ]]; then
+ [[ $(cat "_build/timing/$phase.status") == "1" ]] && status="FAIL"
+ fi
+ echo "| $phase | ${mins}m ${secs}s | $status |" >> $GITHUB_STEP_SUMMARY
+ fi
+ done
+ echo "" >> $GITHUB_STEP_SUMMARY
+ # Add memory stats from metrics
+ if [[ -f "_build/metrics/metrics.csv" ]]; then
+ max_mem=$(awk -F',' 'NR>1 {if($3>max) max=$3} END {printf "%.1f", max/1024}' _build/metrics/metrics.csv)
+ echo "**Peak Memory:** ${max_mem} GB" >> $GITHUB_STEP_SUMMARY
+ fi
- name: Upload test results
uses: actions/upload-artifact@v4
@@ -73,3 +223,13 @@ jobs:
_build/test-perf.csv
_build/test-summary.txt
_build/test-junit.xml
+ _build/logs/test.log
+ _build/timing/
+
+ - name: Upload metrics
+ uses: actions/upload-artifact@v4
+ if: ${{ !cancelled() }}
+ with:
+ name: ${{ matrix.plat }}-metrics
+ path: |
+ _build/metrics/
diff --git a/Makefile b/Makefile
index a73513279fa9..ced76d77a1fa 100644
--- a/Makefile
+++ b/Makefile
@@ -89,6 +89,24 @@ VERBOSE ?= 0
# to run. The default remains static to keep rebuild cost low.
DYNAMIC ?= 0
+# Quiet mode: suppress output unless error (QUIET=1)
+QUIET ?= 0
+
+# Timing, logging, and metrics directories
+TIMING_DIR := _build/timing
+LOGS_DIR := _build/logs
+METRICS_DIR := _build/metrics
+
+# Metrics collection interval (seconds, supports decimals like 0.5)
+METRICS_INTERVAL ?= 0.5
+
+# Enable metrics collection (METRICS=1)
+METRICS ?= 0
+
+# Set default goal explicitly (timing-summary is defined before 'all' in the file,
+# but 'all' should be the default target when running 'make' without arguments)
+.DEFAULT_GOAL := all
+
# If using autoconf feature toggles you can instead run:
# ./configure --enable-dynamic --enable-profiling --enable-debug
# which generates cabal.project.stage2.settings (imported by cabal.project.stage2).
@@ -186,6 +204,33 @@ CPUS_DETECT_SCRIPT := ./mk/detect-cpu-count.sh
CPUS := $(shell if [ -x $(CPUS_DETECT_SCRIPT) ]; then $(CPUS_DETECT_SCRIPT); else echo 2; fi)
THREADS ?= $(shell echo $$(( $(CPUS) + 1 )))
+# --- Timing Summary ---
+# Displays and saves timing information for all build phases
+.PHONY: timing-summary
+timing-summary:
+ @./mk/timing-summary.sh $(TIMING_DIR)
+
+# --- Metrics Collection ---
+# Start/stop CPU and memory metrics collection during build
+# Usage: make METRICS=1 all (to collect metrics during build)
+# make metrics-plot (to generate plot after build)
+
+.PHONY: metrics-start metrics-stop metrics-plot
+
+metrics-start:
+ @./mk/collect-metrics.sh start $(METRICS_DIR) $(METRICS_INTERVAL)
+
+metrics-stop:
+ @./mk/collect-metrics.sh stop $(METRICS_DIR)
+
+# Generate metrics plot with phase markers
+metrics-plot:
+ @if [ -f "$(METRICS_DIR)/metrics.csv" ]; then \
+ $(PYTHON) ./mk/plot-metrics.py $(METRICS_DIR) $(TIMING_DIR) $(METRICS_DIR)/build-metrics.png; \
+ else \
+ echo "No metrics data found. Run 'make METRICS=1 all' first."; \
+ fi
+
CONFIGURE_SCRIPTS = \
configure \
rts/configure \
@@ -219,7 +264,14 @@ CONFIGURED_FILES := \
rts/include/ghcversion.h
# --- Main Targets ---
+# When METRICS=1, wrap the build with metrics collection
+ifeq ($(METRICS),1)
+all: metrics-start _build/bindist
+ @./mk/collect-metrics.sh stop $(METRICS_DIR)
+ @echo "Metrics collected. Run 'make metrics-plot' to generate visualization."
+else
all: _build/bindist
+endif
STAGE_UTIL_TARGETS := \
deriveConstants:deriveConstants \
@@ -570,11 +622,12 @@ $(abspath _build/stage0/bin/cabal): _build/stage0/bin/cabal
# We need an absolute path here otherwise cabal will consider the path relative to `the project directory
_build/stage0/bin/cabal: BUILD_ARGS=-j -w $(GHC0) --disable-tests --project-dir libraries/Cabal --builddir=$(abspath _build/stage0) --ghc-options="-fhide-source-paths"
_build/stage0/bin/cabal:
- @echo "::group::Building Cabal..."
- @mkdir -p _build/stage0/bin _build/logs
- cabal build $(BUILD_ARGS) cabal-install:exe:cabal
- cp -rfp $(shell cabal list-bin -v0 $(BUILD_ARGS) cabal-install:exe:cabal) _build/stage0/bin/cabal
- @echo "::endgroup::"
+ @mkdir -p _build/stage0/bin
+ @./mk/run-phase.sh cabal $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ bash -c 'echo "::group::Building Cabal..."; \
+ cabal build $(BUILD_ARGS) cabal-install:exe:cabal && \
+ cp -rfp $$(cabal list-bin -v0 $(BUILD_ARGS) cabal-install:exe:cabal) _build/stage0/bin/cabal && \
+ echo "::endgroup::"'
# --- Stage 1 build ---
@@ -584,11 +637,11 @@ _build/stage1/%: private GHC=$(GHC0)
.PHONY: $(addprefix _build/stage1/bin/,$(STAGE1_EXECUTABLES))
$(addprefix _build/stage1/bin/,$(STAGE1_EXECUTABLES)) &: private TARGET_PLATFORM=
$(addprefix _build/stage1/bin/,$(STAGE1_EXECUTABLES)) &: $(CABAL) $(CONFIGURE_SCRIPTS) $(CONFIGURED_FILES) libraries/ghc-boot-th-next/ghc-boot-th-next.cabal
- @echo "::group::Building stage1 executables ($(STAGE1_EXECUTABLES))..."
- # Force cabal to replan
- rm -rf _build/stage1/cache
- HADRIAN_SETTINGS='$(HADRIAN_SETTINGS)' $(CABAL_BUILD) $(STAGE1_TARGETS)
- @echo "::endgroup::"
+ @./mk/run-phase.sh stage1 $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ bash -c 'echo "::group::Building stage1 executables ($(STAGE1_EXECUTABLES))..."; \
+ rm -rf _build/stage1/cache && \
+ HADRIAN_SETTINGS='"'"'$(HADRIAN_SETTINGS)'"'"' $(CABAL_BUILD) $(STAGE1_TARGETS) && \
+ echo "::endgroup::"'
_build/stage1/lib/settings: _build/stage1/bin/ghc-toolchain-bin
@echo "::group::Creating settings for $(TARGET_TRIPLE)..."
@@ -628,13 +681,13 @@ _build/stage2/%: private GHC=$(realpath _build/stage1/bin/ghc)
.PHONY: $(addprefix _build/stage2/bin/,$(STAGE2_EXECUTABLES))
$(addprefix _build/stage2/bin/,$(STAGE2_EXECUTABLES)) &: private TARGET_PLATFORM=
$(addprefix _build/stage2/bin/,$(STAGE2_EXECUTABLES)) &: $(CABAL) stage1
- @echo "::group::Building stage2 executables ($(STAGE2_EXECUTABLES))..."
- # Force cabal to replan
- rm -rf _build/stage2/cache
- HADRIAN_SETTINGS='$(HADRIAN_SETTINGS)' \
+ @./mk/run-phase.sh stage2 $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ bash -c 'echo "::group::Building stage2 executables ($(STAGE2_EXECUTABLES))..."; \
+ rm -rf _build/stage2/cache && \
+ HADRIAN_SETTINGS='"'"'$(HADRIAN_SETTINGS)'"'"' \
PATH=$(PWD)/_build/stage1/bin:$(PATH) \
- $(CABAL_BUILD) --ghc-options="-ghcversion-file=$(abspath ./rts/include/ghcversion.h)" -W $(GHC0) $(STAGE2_TARGETS)
- @echo "::endgroup::"
+ $(CABAL_BUILD) --ghc-options="-ghcversion-file=$(abspath ./rts/include/ghcversion.h)" -W $(GHC0) $(STAGE2_TARGETS) && \
+ echo "::endgroup::"'
# Do we want to build these with the stage2 GHC or the stage1 GHC?
# Traditionally we build them with the stage1 ghc, but we could just as well
@@ -642,13 +695,13 @@ $(addprefix _build/stage2/bin/,$(STAGE2_EXECUTABLES)) &: $(CABAL) stage1
.PHONY: $(addprefix _build/stage2/bin/,$(STAGE2_UTIL_EXECUTABLES))
$(addprefix _build/stage2/bin/,$(STAGE2_UTIL_EXECUTABLES)) &: private TARGET_PLATFORM=
$(addprefix _build/stage2/bin/,$(STAGE2_UTIL_EXECUTABLES)) &: $(CABAL) stage1 cabal.project.stage2.settings
- @echo "::group::Building stage2 utilities ($(STAGE2_UTIL_EXECUTABLES))..."
- # Force cabal to replan
- rm -rf _build/stage2/cache
- HADRIAN_SETTINGS='$(HADRIAN_SETTINGS)' \
+ @./mk/run-phase.sh stage2-utils $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ bash -c 'echo "::group::Building stage2 utilities ($(STAGE2_UTIL_EXECUTABLES))..."; \
+ rm -rf _build/stage2/cache && \
+ HADRIAN_SETTINGS='"'"'$(HADRIAN_SETTINGS)'"'"' \
PATH=$(PWD)/_build/stage1/bin:$(PATH) \
- $(CABAL_BUILD) --ghc-options="-ghcversion-file=$(abspath ./rts/include/ghcversion.h)" -W $(GHC0) $(STAGE2_UTIL_TARGETS)
- @echo "::endgroup::"
+ $(CABAL_BUILD) --ghc-options="-ghcversion-file=$(abspath ./rts/include/ghcversion.h)" -W $(GHC0) $(STAGE2_UTIL_TARGETS) && \
+ echo "::endgroup::"'
_build/stage2/lib/settings: _build/stage1/lib/settings
@mkdir -p $(@D)
@@ -872,40 +925,45 @@ define copycrosslib
done
endef
-# Target for creating the final binary distribution directory
-#_build/bindist: stage2 driver/ghc-usage.txt driver/ghci-usage.txt
+# Target for creating the final binary distribution directory - wrapped with timing and quiet mode
_build/bindist: stage2 driver/ghc-usage.txt driver/ghci-usage.txt
- @echo "::group::Creating binary distribution in $@"
- @mkdir -p $@/bin
- @mkdir -p $@/lib
+ @./mk/run-phase.sh bindist $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ $(MAKE) --no-print-directory _build_bindist_impl
+
+# Internal target for bindist (called by _build/bindist with timing wrapper)
+.PHONY: _build_bindist_impl
+_build_bindist_impl:
+ @echo "::group::Creating binary distribution in _build/bindist"
+ @mkdir -p _build/bindist/bin
+ @mkdir -p _build/bindist/lib
# Copy executables from stage2 bin
- @cp -rfp _build/stage2/bin/* $@/bin/
+ @cp -rfp _build/stage2/bin/* _build/bindist/bin/
# Copy libraries and settings from stage2 lib
- @cp -rfp _build/stage2/lib/{package.conf.d,settings,template-hsc.h} $@/lib/
- @mkdir -p $@/lib/$(HOST_PLATFORM)
- @cd $@/lib/package.conf.d ; \
+ @cp -rfp _build/stage2/lib/{package.conf.d,settings,template-hsc.h} _build/bindist/lib/
+ @mkdir -p _build/bindist/lib/$(HOST_PLATFORM)
+ @cd _build/bindist/lib/package.conf.d ; \
for pkg in *.conf ; do \
pkgname=`echo $${pkg} | $(SED) 's/-[0-9.]*\(-[0-9a-zA-Z]*\)\?\.conf//'` ; \
pkgnamever=`echo $${pkg} | $(SED) 's/\.conf//'` ; \
- mkdir -p $(CURDIR)/$@/lib/$(HOST_PLATFORM)/$${pkg%.conf} ; \
- cp -rfp $(CURDIR)/_build/stage2/build/host/*/ghc-*/$${pkg%.conf}/build/* $(CURDIR)/$@/lib/$(HOST_PLATFORM)/$${pkg%.conf} ; \
+ mkdir -p $(CURDIR)/_build/bindist/lib/$(HOST_PLATFORM)/$${pkg%.conf} ; \
+ cp -rfp $(CURDIR)/_build/stage2/build/host/*/ghc-*/$${pkg%.conf}/build/* $(CURDIR)/_build/bindist/lib/$(HOST_PLATFORM)/$${pkg%.conf} ; \
if [ $${pkgname} = "libffi-clib" ] ; then \
- ffi_incdir=`$(CURDIR)/$@/bin/ghc-pkg field libffi-clib include-dirs | grep '/libffi-clib/src/' | sed 's|.*$(CURDIR)/||'` ; \
+ ffi_incdir=`$(CURDIR)/_build/bindist/bin/ghc-pkg field libffi-clib include-dirs | grep '/libffi-clib/src/' | sed 's|.*$(CURDIR)/||'` ; \
$(call patchpackageconf,$${pkgname},$${pkg},../../..,$(HOST_PLATFORM),$${pkgnamever}) ; \
- $(call copy_headers,ffitarget.h,$(CURDIR)/$${ffi_incdir},libffi-clib,$(CURDIR)/$@/bin/ghc-pkg) ; \
+ $(call copy_headers,ffitarget.h,$(CURDIR)/$${ffi_incdir},libffi-clib,$(CURDIR)/_build/bindist/bin/ghc-pkg) ; \
else \
$(call patchpackageconf,$${pkgname},$${pkg},../../..,$(HOST_PLATFORM),$${pkgnamever}) ; \
fi ; \
done
# Copy driver usage files
- @cp -rfp driver/ghc-usage.txt $@/lib/
- @cp -rfp driver/ghci-usage.txt $@/lib/
+ @cp -rfp driver/ghc-usage.txt _build/bindist/lib/
+ @cp -rfp driver/ghci-usage.txt _build/bindist/lib/
@echo "FIXME: Changing 'Support SMP' from YES to NO in settings file"
- @$(SED) 's/("Support SMP","YES")/("Support SMP","NO")/' -i.bck $@/lib/settings
+ @$(SED) 's/("Support SMP","YES")/("Support SMP","NO")/' -i.bck _build/bindist/lib/settings
# Recache
- $@/bin/ghc-pkg recache
+ _build/bindist/bin/ghc-pkg recache
# Copy headers
- @$(call copy_all_stage2_h,$@/bin/ghc-pkg)
+ @$(call copy_all_stage2_h,_build/bindist/bin/ghc-pkg)
@echo "::endgroup::"
_build/bindist/ghc.tar.gz: _build/bindist
@@ -1078,26 +1136,28 @@ testsuite-timeout:
$(MAKE) -C testsuite/timeout
# --- Test Target ---
-
-test: _build/bindist testsuite-timeout
- @echo "::group::Running tests with THREADS=$(THREADS)" >&2
- # If any required tool is missing, testsuite logic will skip related tests.
- TEST_HC='$(TEST_GHC)' \
- GHC_PKG='$(TEST_GHC_PKG)' \
- HP2PS_ABS='$(TEST_HP2PS)' \
- HPC='$(TEST_HPC)' \
- RUNGHC='$(TEST_RUN_GHC)' \
- TEST_CC='$(CC)' \
- TEST_CXX='$(CXX)' \
- TEST_HC_OPTS='$(CANONICAL_TEST_HC_OPTS)' \
- METRICS_FILE='$(CURDIR)/_build/test-perf.csv' \
- SUMMARY_FILE='$(CURDIR)/_build/test-summary.txt' \
- JUNIT_FILE='$(CURDIR)/_build/test-junit.xml' \
- SKIP_PERF_TESTS='$(SKIP_PERF_TESTS)' \
- THREADS='$(THREADS)' \
- $(MAKE) -C testsuite/tests test
- @echo "::endgroup::"
+# Uses order-only prerequisites (|) so that running 'make test' after 'make all'
+# doesn't re-run the entire build chain (which would overwrite timing files)
+
+test: | _build/bindist testsuite-timeout
+ @./mk/run-phase.sh test $(QUIET) $(TIMING_DIR) $(LOGS_DIR) -- \
+ bash -c 'echo "::group::Running tests with THREADS=$(THREADS)" >&2; \
+ TEST_HC='"'"'$(TEST_GHC)'"'"' \
+ GHC_PKG='"'"'$(TEST_GHC_PKG)'"'"' \
+ HP2PS_ABS='"'"'$(TEST_HP2PS)'"'"' \
+ HPC='"'"'$(TEST_HPC)'"'"' \
+ RUNGHC='"'"'$(TEST_RUN_GHC)'"'"' \
+ TEST_CC='"'"'$(CC)'"'"' \
+ TEST_CXX='"'"'$(CXX)'"'"' \
+ TEST_HC_OPTS='"'"'$(CANONICAL_TEST_HC_OPTS)'"'"' \
+ METRICS_FILE='"'"'$(CURDIR)/_build/test-perf.csv'"'"' \
+ SUMMARY_FILE='"'"'$(CURDIR)/_build/test-summary.txt'"'"' \
+ JUNIT_FILE='"'"'$(CURDIR)/_build/test-junit.xml'"'"' \
+ SKIP_PERF_TESTS='"'"'$(SKIP_PERF_TESTS)'"'"' \
+ THREADS='"'"'$(THREADS)'"'"' \
+ $(MAKE) -C testsuite/tests test && \
+ echo "::endgroup::"'
# Inform Make that these are not actual files if they get deleted by other means
-.PHONY: clean clean-stage1 clean-stage2 clean-stage3 distclean test all
+.PHONY: clean clean-stage1 clean-stage2 clean-stage3 distclean test all timing-summary
diff --git a/mk/collect-metrics.sh b/mk/collect-metrics.sh
new file mode 100755
index 000000000000..9750685ab3a6
--- /dev/null
+++ b/mk/collect-metrics.sh
@@ -0,0 +1,221 @@
+#!/usr/bin/env bash
+# collect-metrics.sh - Collect CPU and memory metrics during build
+#
+# Usage: collect-metrics.sh start METRICS_DIR [INTERVAL]
+# collect-metrics.sh stop
+#
+# Commands:
+# start - Start collecting metrics (runs in background)
+# stop - Stop metrics collection
+#
+# Arguments:
+# METRICS_DIR - Directory for metrics output
+# INTERVAL - Sample interval in seconds (default: 0.5)
+#
+# Output files:
+# $METRICS_DIR/metrics.csv - CSV with timestamp, cpu%, mem_used_mb, mem_total_mb
+# $METRICS_DIR/collector.pid - PID file for the collector process
+
+set -uo pipefail
+
+CMD="${1:-}"
+METRICS_DIR="${2:-_build/metrics}"
+INTERVAL="${3:-0.5}"
+
+PID_FILE="$METRICS_DIR/collector.pid"
+METRICS_FILE="$METRICS_DIR/metrics.csv"
+
+# Detect OS for platform-specific commands
+OS="$(uname -s)"
+
+# State file for CPU delta calculation
+CPU_STATE_FILE=""
+
+# Get CPU usage percentage (cross-platform)
+# Calculates delta between samples for accurate instantaneous usage
+get_cpu_usage() {
+ case "$OS" in
+ Darwin)
+ # macOS: use sysctl for instant CPU ticks, calculate delta
+ # This is much faster than top or iostat
+ local ticks
+ ticks=$(sysctl -n kern.cp_time 2>/dev/null)
+ if [[ -z "$ticks" ]]; then
+ # Fallback: use ps to get total CPU (less accurate but fast)
+ ps -A -o %cpu | awk '{sum += $1} END {printf "%.1f", sum}'
+ return
+ fi
+
+ # Parse: user nice sys idle
+ local user nice sys idle total
+ read user nice sys idle <<< "$ticks"
+ total=$((user + nice + sys + idle))
+
+ # Calculate delta from previous sample
+ if [[ -f "$CPU_STATE_FILE" ]]; then
+ local prev_total prev_idle
+ read prev_total prev_idle < "$CPU_STATE_FILE"
+ local delta_total=$((total - prev_total))
+ local delta_idle=$((idle - prev_idle))
+ if [[ $delta_total -gt 0 ]]; then
+ echo "$total $idle" > "$CPU_STATE_FILE"
+ awk "BEGIN {printf \"%.1f\", 100 * (1 - $delta_idle / $delta_total)}"
+ return
+ fi
+ fi
+
+ # First sample or invalid delta: store state, return cumulative
+ echo "$total $idle" > "$CPU_STATE_FILE"
+ if [[ $total -gt 0 ]]; then
+ awk "BEGIN {printf \"%.1f\", 100 * (1 - $idle / $total)}"
+ else
+ echo "0"
+ fi
+ ;;
+ Linux)
+ # Linux: calculate from /proc/stat with delta
+ # /proc/stat format: cpu [steal] [guest] [guest_nice]
+ if [[ ! -f /proc/stat ]]; then
+ echo "0"
+ return
+ fi
+
+ local line
+ read -r line < /proc/stat
+
+ # Parse fields - use named variable for label instead of _ (special bash variable)
+ # Note: /proc/stat has 10 numeric fields; we only need the first 7 for CPU calculation
+ # The 'rest' variable captures any additional fields (steal, guest, guest_nice)
+ local label user nice sys idle iowait irq softirq rest
+ read label user nice sys idle iowait irq softirq rest <<< "$line"
+
+ # Validate we got numeric values (guards against parse failures)
+ if [[ -z "$user" || -z "$idle" ]]; then
+ echo "0"
+ return
+ fi
+
+ local total=$((user + nice + sys + idle + iowait + irq + softirq))
+
+ if [[ -f "$CPU_STATE_FILE" ]]; then
+ local prev_total prev_idle
+ read prev_total prev_idle < "$CPU_STATE_FILE"
+ local delta_total=$((total - prev_total))
+ local delta_idle=$((idle - prev_idle))
+ if [[ $delta_total -gt 0 ]]; then
+ echo "$total $idle" > "$CPU_STATE_FILE"
+ awk "BEGIN {printf \"%.1f\", 100 * (1 - $delta_idle / $delta_total)}"
+ return
+ fi
+ fi
+
+ echo "$total $idle" > "$CPU_STATE_FILE"
+ if [[ $total -gt 0 ]]; then
+ awk "BEGIN {printf \"%.1f\", 100 * (1 - $idle / $total)}"
+ else
+ echo "0"
+ fi
+ ;;
+ *)
+ echo "0"
+ ;;
+ esac
+}
+
+# Get memory usage in MB (cross-platform)
+get_memory_usage() {
+ case "$OS" in
+ Darwin)
+ # macOS: use vm_stat and sysctl
+ page_size=$(sysctl -n hw.pagesize 2>/dev/null || echo 4096)
+ total_mb=$(( $(sysctl -n hw.memsize 2>/dev/null || echo 0) / 1024 / 1024 ))
+
+ # Parse vm_stat output
+ vm_stat 2>/dev/null | awk -v ps="$page_size" -v total="$total_mb" '
+ /Pages active/ { active = $3 + 0 }
+ /Pages wired/ { wired = $4 + 0 }
+ /Pages compressed/ { compressed = $5 + 0 }
+ END {
+ used_mb = int((active + wired + compressed) * ps / 1024 / 1024)
+ printf "%d,%d", used_mb, total
+ }
+ '
+ ;;
+ Linux)
+ # Linux: parse /proc/meminfo
+ awk '
+ /^MemTotal:/ { total = $2 }
+ /^MemAvailable:/ { available = $2 }
+ END {
+ total_mb = int(total / 1024)
+ used_mb = int((total - available) / 1024)
+ printf "%d,%d", used_mb, total_mb
+ }
+ ' /proc/meminfo
+ ;;
+ *)
+ echo "0,0"
+ ;;
+ esac
+}
+
+# Collector loop
+run_collector() {
+ mkdir -p "$METRICS_DIR"
+
+ # Initialize CPU state file for delta calculations
+ CPU_STATE_FILE="$METRICS_DIR/.cpu_state"
+ rm -f "$CPU_STATE_FILE"
+
+ # Write CSV header
+ echo "timestamp,cpu_percent,mem_used_mb,mem_total_mb" > "$METRICS_FILE"
+
+ while true; do
+ timestamp=$(date +%s)
+ cpu=$(get_cpu_usage)
+ mem=$(get_memory_usage)
+
+ echo "$timestamp,$cpu,$mem" >> "$METRICS_FILE"
+ sleep "$INTERVAL"
+ done
+}
+
+case "$CMD" in
+ start)
+ mkdir -p "$METRICS_DIR"
+
+ # Stop any existing collector
+ if [[ -f "$PID_FILE" ]]; then
+ old_pid=$(cat "$PID_FILE")
+ kill "$old_pid" 2>/dev/null || true
+ rm -f "$PID_FILE"
+ fi
+
+ # Start collector in background
+ run_collector &
+ collector_pid=$!
+ echo "$collector_pid" > "$PID_FILE"
+ echo "Started metrics collector (PID: $collector_pid, interval: ${INTERVAL}s)"
+ echo "Output: $METRICS_FILE"
+ ;;
+
+ stop)
+ if [[ -f "$PID_FILE" ]]; then
+ pid=$(cat "$PID_FILE")
+ if kill "$pid" 2>/dev/null; then
+ echo "Stopped metrics collector (PID: $pid)"
+ else
+ echo "Collector process $pid not running"
+ fi
+ rm -f "$PID_FILE"
+ else
+ echo "No collector PID file found"
+ fi
+ ;;
+
+ *)
+ echo "Usage: $0 start METRICS_DIR [INTERVAL]"
+ echo " $0 stop"
+ exit 1
+ ;;
+esac
diff --git a/mk/plot-metrics.py b/mk/plot-metrics.py
new file mode 100755
index 000000000000..c9d7e719de39
--- /dev/null
+++ b/mk/plot-metrics.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+"""
+plot-metrics.py - Generate build metrics visualization
+
+Usage: plot-metrics.py METRICS_DIR TIMING_DIR [OUTPUT_PREFIX]
+
+Arguments:
+ METRICS_DIR - Directory containing metrics.csv
+ TIMING_DIR - Directory containing phase timing files (.start, .end)
+ OUTPUT_PREFIX - Output file prefix (default: METRICS_DIR/build-metrics)
+ Generates: PREFIX-build.svg and PREFIX-test.svg
+
+Creates dual-axis plots showing:
+ - CPU usage over time (left axis, blue)
+ - Memory usage over time (right axis, green)
+ - Phase markers with shaded regions and labels
+
+Two separate plots are generated:
+ - Build plot: cabal, stage1, stage2, stage2-utils, bindist phases
+ - Test plot: test phase only
+"""
+
+import sys
+import os
+import csv
+from datetime import datetime
+from pathlib import Path
+
+# Try to import matplotlib, provide helpful error if not available
+try:
+ import matplotlib
+ matplotlib.use('Agg') # Non-interactive backend for headless use
+ import matplotlib.pyplot as plt
+ import matplotlib.dates as mdates
+ from matplotlib.patches import Rectangle
+except ImportError:
+ print("Error: matplotlib is required. Install with:")
+ print(" nix run nixpkgs#python3Packages.matplotlib -- mk/plot-metrics.py ...")
+ print(" # or: pip install matplotlib")
+ sys.exit(1)
+
+
+def read_metrics(metrics_file):
+ """Read metrics CSV file and return timestamps, cpu, and memory data."""
+ timestamps = []
+ cpu_percent = []
+ mem_used_mb = []
+ mem_total_mb = []
+
+ with open(metrics_file, 'r') as f:
+ reader = csv.DictReader(f)
+ for row in reader:
+ try:
+ ts = int(row['timestamp'])
+ timestamps.append(datetime.fromtimestamp(ts))
+ cpu_percent.append(float(row['cpu_percent']))
+ mem_used_mb.append(float(row['mem_used_mb']))
+ mem_total_mb.append(float(row['mem_total_mb']))
+ except (ValueError, KeyError) as e:
+ continue # Skip malformed rows
+
+ return timestamps, cpu_percent, mem_used_mb, mem_total_mb
+
+
+def read_phases(timing_dir):
+ """Read phase timing files and return list of (name, start_time, end_time, status)."""
+ phases = []
+ timing_path = Path(timing_dir)
+
+ # Find all .start files
+ for start_file in timing_path.glob('*.start'):
+ phase_name = start_file.stem
+ end_file = timing_path / f"{phase_name}.end"
+ status_file = timing_path / f"{phase_name}.status"
+
+ if not end_file.exists():
+ continue
+
+ try:
+ with open(start_file) as f:
+ start_ts = int(f.read().strip())
+ with open(end_file) as f:
+ end_ts = int(f.read().strip())
+
+ status = "OK"
+ if status_file.exists():
+ with open(status_file) as f:
+ status = "FAIL" if f.read().strip() == "1" else "OK"
+
+ phases.append((
+ phase_name,
+ datetime.fromtimestamp(start_ts),
+ datetime.fromtimestamp(end_ts),
+ status
+ ))
+ except (ValueError, IOError):
+ continue
+
+ # Sort by start time
+ phases.sort(key=lambda x: x[1])
+ return phases
+
+
+def format_duration(seconds):
+ """Format duration in human-readable form."""
+ if seconds < 60:
+ return f"{seconds}s"
+ elif seconds < 3600:
+ mins = seconds // 60
+ secs = seconds % 60
+ return f"{mins}m {secs}s"
+ else:
+ hours = seconds // 3600
+ mins = (seconds % 3600) // 60
+ return f"{hours}h {mins}m"
+
+
+def create_plot(timestamps, cpu, mem_used, mem_total, phases, title, output_file):
+ """Create a single metrics plot for the given data and phases."""
+ # Define colors
+ cpu_color = '#2E86AB' # Blue
+ mem_color = '#28A745' # Green
+ phase_colors = {
+ 'cabal': '#FFD700', # Gold
+ 'stage1': '#FF6B6B', # Red
+ 'stage2': '#4ECDC4', # Teal
+ 'stage2-utils': '#95E1D3', # Light teal
+ 'bindist': '#A8E6CF', # Mint
+ 'test': '#DDA0DD', # Plum
+ }
+
+ # Create figure with dual y-axes - wider aspect ratio (20:6)
+ fig, ax1 = plt.subplots(figsize=(20, 6))
+
+ # Calculate effective concurrency from max CPU usage
+ max_cpu = max(cpu) if cpu else 100
+ effective_cores = int((max_cpu + 99) // 100) # ceil(max_cpu / 100)
+ cpu_limit = effective_cores * 100
+
+ # Plot CPU usage
+ ax1.set_xlabel('Time', fontsize=11)
+ ax1.set_ylabel(f'CPU Usage (%, {effective_cores} cores)', color=cpu_color, fontsize=11)
+ line1, = ax1.plot(timestamps, cpu, color=cpu_color, linewidth=1.2, alpha=0.8, label='CPU %')
+ ax1.tick_params(axis='y', labelcolor=cpu_color)
+ ax1.set_ylim(0, cpu_limit * 1.05)
+ ax1.grid(True, alpha=0.3)
+
+ # Create second y-axis for memory
+ ax2 = ax1.twinx()
+ ax2.set_ylabel('Memory Used (GB)', color=mem_color, fontsize=11)
+ mem_gb = [m / 1024 for m in mem_used]
+ line2, = ax2.plot(timestamps, mem_gb, color=mem_color, linewidth=1.2, alpha=0.8, label='Memory (GB)')
+ ax2.tick_params(axis='y', labelcolor=mem_color)
+
+ # Set memory y-axis limit based on total memory
+ if mem_total:
+ max_mem_gb = max(mem_total) / 1024
+ ax2.set_ylim(0, max_mem_gb * 1.1)
+
+ # Add phase markers as shaded regions
+ if phases and timestamps:
+ plot_start = timestamps[0]
+ plot_end = timestamps[-1]
+
+ for phase_name, start, end, status in phases:
+ # Clamp to plot range
+ if end < plot_start or start > plot_end:
+ continue
+
+ start = max(start, plot_start)
+ end = min(end, plot_end)
+
+ # Get color for phase
+ color = phase_colors.get(phase_name, '#CCCCCC')
+
+ # Add shaded region
+ ax1.axvspan(start, end, alpha=0.2, color=color)
+
+ # Add vertical line at phase start
+ ax1.axvline(x=start, color=color, linestyle='--', linewidth=1, alpha=0.7)
+
+ # Add phase label at top
+ mid_time = start + (end - start) / 2
+ duration = int((end - start).total_seconds())
+ duration_str = format_duration(duration)
+ status_marker = '✓' if status == 'OK' else '✗'
+
+ ax1.annotate(
+ f'{phase_name}\n{duration_str} {status_marker}',
+ xy=(mid_time, cpu_limit),
+ fontsize=10,
+ ha='center',
+ va='top',
+ bbox=dict(boxstyle='round,pad=0.3', facecolor=color, alpha=0.7)
+ )
+
+ # Format x-axis
+ ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
+ ax1.xaxis.set_major_locator(mdates.AutoDateLocator())
+ plt.xticks(rotation=45)
+
+ # Title
+ plt.title(title, fontsize=13, fontweight='bold')
+
+ # Legend
+ lines = [line1, line2]
+ labels = ['CPU Usage (%)', 'Memory Used (GB)']
+ ax1.legend(lines, labels, loc='upper left', framealpha=0.9)
+
+ # Tight layout
+ plt.tight_layout()
+
+ # Save as SVG
+ plt.savefig(output_file, format='svg', bbox_inches='tight')
+ plt.close(fig)
+ print(f"Plot saved to: {output_file}")
+
+
+def filter_metrics_for_phases(timestamps, cpu, mem_used, mem_total, phases):
+ """Filter metrics data to only include time range covered by phases."""
+ if not phases or not timestamps:
+ return timestamps, cpu, mem_used, mem_total
+
+ # Get time range from phases
+ phase_start = min(p[1] for p in phases)
+ phase_end = max(p[2] for p in phases)
+
+ # Add some margin (30 seconds before and after)
+ from datetime import timedelta
+ margin = timedelta(seconds=30)
+ range_start = phase_start - margin
+ range_end = phase_end + margin
+
+ # Filter data
+ filtered = [(t, c, m, mt) for t, c, m, mt in zip(timestamps, cpu, mem_used, mem_total)
+ if range_start <= t <= range_end]
+
+ if not filtered:
+ return timestamps, cpu, mem_used, mem_total
+
+ return zip(*filtered)
+
+
+def plot_metrics(metrics_dir, timing_dir, output_prefix):
+ """Generate the metrics plots (build and test separately)."""
+ metrics_file = Path(metrics_dir) / 'metrics.csv'
+
+ if not metrics_file.exists():
+ print(f"Error: Metrics file not found: {metrics_file}")
+ sys.exit(1)
+
+ # Read data
+ timestamps, cpu, mem_used, mem_total = read_metrics(metrics_file)
+ all_phases = read_phases(timing_dir)
+
+ if not timestamps:
+ print("Error: No metrics data found")
+ sys.exit(1)
+
+ # Separate build phases from test phase
+ build_phase_names = {'cabal', 'stage1', 'stage2', 'stage2-utils', 'bindist'}
+ build_phases = [p for p in all_phases if p[0] in build_phase_names]
+ test_phases = [p for p in all_phases if p[0] == 'test']
+
+ # Generate build plot
+ if build_phases:
+ ts_build, cpu_build, mem_build, mem_total_build = filter_metrics_for_phases(
+ timestamps, cpu, mem_used, mem_total, build_phases)
+ ts_build, cpu_build, mem_build, mem_total_build = list(ts_build), list(cpu_build), list(mem_build), list(mem_total_build)
+
+ build_duration = int((build_phases[-1][2] - build_phases[0][1]).total_seconds())
+ build_title = f'GHC Build Metrics - Total: {format_duration(build_duration)}'
+ build_output = f"{output_prefix}-build.svg"
+ create_plot(ts_build, cpu_build, mem_build, mem_total_build,
+ build_phases, build_title, build_output)
+
+ # Generate test plot
+ if test_phases:
+ ts_test, cpu_test, mem_test, mem_total_test = filter_metrics_for_phases(
+ timestamps, cpu, mem_used, mem_total, test_phases)
+ ts_test, cpu_test, mem_test, mem_total_test = list(ts_test), list(cpu_test), list(mem_test), list(mem_total_test)
+
+ test_duration = int((test_phases[-1][2] - test_phases[0][1]).total_seconds())
+ test_title = f'GHC Test Metrics - Duration: {format_duration(test_duration)}'
+ test_output = f"{output_prefix}-test.svg"
+ create_plot(ts_test, cpu_test, mem_test, mem_total_test,
+ test_phases, test_title, test_output)
+
+ # Print phase summary
+ if all_phases:
+ total_duration = int((all_phases[-1][2] - all_phases[0][1]).total_seconds())
+ print("\nPhase Summary:")
+ print("-" * 50)
+ for phase_name, start, end, status in all_phases:
+ duration = int((end - start).total_seconds())
+ print(f" {phase_name:15} {format_duration(duration):>10} [{status}]")
+ print("-" * 50)
+ print(f" {'TOTAL':15} {format_duration(total_duration):>10}")
+
+
+def main():
+ if len(sys.argv) < 3:
+ print(__doc__)
+ sys.exit(1)
+
+ metrics_dir = sys.argv[1]
+ timing_dir = sys.argv[2]
+ output_prefix = sys.argv[3] if len(sys.argv) > 3 else os.path.join(metrics_dir, 'metrics')
+
+ plot_metrics(metrics_dir, timing_dir, output_prefix)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/mk/run-phase.sh b/mk/run-phase.sh
new file mode 100755
index 000000000000..07cae5373834
--- /dev/null
+++ b/mk/run-phase.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+# run-phase.sh - Wrapper for timed build phases with quiet mode support
+#
+# Usage: run-phase.sh PHASE_NAME QUIET TIMING_DIR LOGS_DIR -- COMMAND...
+#
+# Arguments:
+# PHASE_NAME - Name of the build phase (cabal, stage1, stage2, bindist, test)
+# QUIET - "1" to suppress output (log to file), "0" for normal output
+# TIMING_DIR - Directory for timing files (.start, .end, .status)
+# LOGS_DIR - Directory for log files
+# COMMAND... - The actual build command to run
+#
+# Creates:
+# $TIMING_DIR/$PHASE.start - Unix timestamp when phase started
+# $TIMING_DIR/$PHASE.end - Unix timestamp when phase ended
+# $TIMING_DIR/$PHASE.status - "0" for success, "1" for failure
+# $LOGS_DIR/$PHASE.log - Build output (only in quiet mode)
+#
+# On failure in quiet mode, prints last 100 lines of log.
+#
+# No-op detection:
+# If a build completes in < 30s AND previous timing files exist with
+# duration > 30s, the previous timing is preserved. This prevents
+# "make test" from overwriting real build times with no-op verification times.
+
+set -uo pipefail
+
+PHASE="$1"
+QUIET="$2"
+TIMING_DIR="$3"
+LOGS_DIR="$4"
+shift 4
+
+# Consume the -- separator if present
+[[ "${1:-}" == "--" ]] && shift
+
+mkdir -p "$TIMING_DIR" "$LOGS_DIR"
+
+# Save existing timing if present (for no-op detection)
+OLD_START=""
+OLD_END=""
+if [[ -f "$TIMING_DIR/$PHASE.start" && -f "$TIMING_DIR/$PHASE.end" ]]; then
+ OLD_START=$(cat "$TIMING_DIR/$PHASE.start")
+ OLD_END=$(cat "$TIMING_DIR/$PHASE.end")
+fi
+
+# Record start time
+START_TIME=$(date +%s)
+echo "$START_TIME" > "$TIMING_DIR/$PHASE.start"
+echo ">>> Building $PHASE..."
+
+if [[ "$QUIET" == "1" ]]; then
+ # Quiet mode: redirect all output to log file
+ if "$@" > "$LOGS_DIR/$PHASE.log" 2>&1; then
+ echo "0" > "$TIMING_DIR/$PHASE.status"
+ else
+ echo "1" > "$TIMING_DIR/$PHASE.status"
+ date +%s > "$TIMING_DIR/$PHASE.end"
+ echo ""
+ echo "=== ERROR building $PHASE (last 100 lines) ==="
+ tail -100 "$LOGS_DIR/$PHASE.log"
+ exit 1
+ fi
+else
+ # Normal mode: show output directly
+ if "$@"; then
+ echo "0" > "$TIMING_DIR/$PHASE.status"
+ else
+ echo "1" > "$TIMING_DIR/$PHASE.status"
+ date +%s > "$TIMING_DIR/$PHASE.end"
+ exit 1
+ fi
+fi
+
+END_TIME=$(date +%s)
+DURATION=$((END_TIME - START_TIME))
+
+# No-op detection: If build took < 30s AND we had previous timing, restore it
+# This preserves real build times when make re-runs stages as no-ops
+NOOP_THRESHOLD=30
+if [[ -n "$OLD_START" && -n "$OLD_END" && "$DURATION" -lt "$NOOP_THRESHOLD" ]]; then
+ OLD_DURATION=$((OLD_END - OLD_START))
+ # Only restore if old duration was significantly longer (real build)
+ if [[ "$OLD_DURATION" -gt "$NOOP_THRESHOLD" ]]; then
+ echo "$OLD_START" > "$TIMING_DIR/$PHASE.start"
+ echo "$OLD_END" > "$TIMING_DIR/$PHASE.end"
+ exit 0
+ fi
+fi
+
+echo "$END_TIME" > "$TIMING_DIR/$PHASE.end"
diff --git a/mk/timing-summary.sh b/mk/timing-summary.sh
new file mode 100755
index 000000000000..be2b13f80a60
--- /dev/null
+++ b/mk/timing-summary.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+# timing-summary.sh - Display and save timing information for build phases
+#
+# Usage: timing-summary.sh TIMING_DIR
+#
+# Reads timing files from TIMING_DIR and displays an ASCII table summary.
+# Also saves summary to TIMING_DIR/summary.txt
+
+set -uo pipefail
+
+TIMING_DIR="${1:-.}"
+
+echo ""
+echo "+------------------+---------------+--------+"
+echo "| Phase | Duration | Status |"
+echo "+------------------+---------------+--------+"
+
+total=0
+for phase in cabal stage1 stage2 stage2-utils bindist test; do
+ if [[ -f "$TIMING_DIR/$phase.start" ]] && [[ -f "$TIMING_DIR/$phase.end" ]]; then
+ start=$(cat "$TIMING_DIR/$phase.start")
+ end=$(cat "$TIMING_DIR/$phase.end")
+ dur=$((end - start))
+ total=$((total + dur))
+
+ hrs=$((dur / 3600))
+ mins=$(((dur % 3600) / 60))
+ secs=$((dur % 60))
+
+ if [[ $hrs -gt 0 ]]; then
+ dur_str="${hrs}h ${mins}m ${secs}s"
+ elif [[ $mins -gt 0 ]]; then
+ dur_str="${mins}m ${secs}s"
+ else
+ dur_str="${secs}s"
+ fi
+
+ status=$(cat "$TIMING_DIR/$phase.status" 2>/dev/null || echo "?")
+ if [[ "$status" == "0" ]]; then
+ status_str="OK"
+ elif [[ "$status" == "1" ]]; then
+ status_str="FAIL"
+ else
+ status_str="-"
+ fi
+
+ printf "| %-16s | %13s | %-6s |\n" "$phase" "$dur_str" "$status_str"
+ fi
+done
+
+echo "+------------------+---------------+--------+"
+
+hrs=$((total / 3600))
+mins=$(((total % 3600) / 60))
+secs=$((total % 60))
+
+if [[ $hrs -gt 0 ]]; then
+ total_str="${hrs}h ${mins}m ${secs}s"
+elif [[ $mins -gt 0 ]]; then
+ total_str="${mins}m ${secs}s"
+else
+ total_str="${secs}s"
+fi
+
+printf "| %-16s | %13s | |\n" "TOTAL" "$total_str"
+echo "+------------------+---------------+--------+"
+
+# Save summary to file
+mkdir -p "$TIMING_DIR"
+rm -f "$TIMING_DIR/summary.txt"
+for phase in cabal stage1 stage2 stage2-utils bindist test; do
+ if [[ -f "$TIMING_DIR/$phase.start" ]] && [[ -f "$TIMING_DIR/$phase.end" ]]; then
+ start=$(cat "$TIMING_DIR/$phase.start")
+ end=$(cat "$TIMING_DIR/$phase.end")
+ dur=$((end - start))
+ status=$(cat "$TIMING_DIR/$phase.status" 2>/dev/null || echo "?")
+ echo "$phase $dur $status" >> "$TIMING_DIR/summary.txt"
+ fi
+done
diff --git a/testsuite/tests/plugins/all.T b/testsuite/tests/plugins/all.T
index fb671f93b8d6..5b5b1324d819 100644
--- a/testsuite/tests/plugins/all.T
+++ b/testsuite/tests/plugins/all.T
@@ -154,9 +154,15 @@ test('T14335',
['-package-db simple-plugin/pkg.plugins01/local.package.conf -fplugin Simple.Plugin \
-fexternal-interpreter -package simple-plugin ' + config.plugin_way_flags])
+# On AArch64 Darwin with static GHC, plugin tests using the RTS in-memory linker
+# can fail non-deterministically with "Relocation out of range for SUBTRACTOR".
+# This occurs because macOS ASLR can place object file sections >2GB apart,
+# exceeding the 32-bit signed range for ARM64_RELOC_SUBTRACTOR relocations.
+# See: https://gitlab.haskell.org/ghc/ghc/-/issues/21972 (macOS relocation issues)
+# rts/linker/MachO.c (fitsBits check for SUBTRACTOR)
test('plugin-recomp-pure',
[extra_files(['plugin-recomp/', 'plugin-recomp-test.hs']),
-
+ when(opsys('darwin') and arch('aarch64') and not ghc_dynamic(), fragile(21972)),
pre_cmd('$MAKE -s --no-print-directory -C plugin-recomp package.plugins01 TOP={top}')
],
makefile_test, [])
@@ -308,8 +314,10 @@ test('T23832_invalid',
pre_cmd('$MAKE -s --no-print-directory -C defaulting-plugin package.test-defaulting-plugin TOP={top}')],
makefile_test, [])
+# See comment above plugin-recomp-pure for explanation of AArch64 Darwin fragility
test('plugins-order',
[extra_files(['plugin-recomp/', 'plugin-recomp-test.hs']),
+ when(opsys('darwin') and arch('aarch64') and not ghc_dynamic(), fragile(21972)),
pre_cmd('$MAKE -s --no-print-directory -C plugin-recomp package.plugins01 TOP={top}')
],
makefile_test, [])
@@ -359,8 +367,10 @@ test('plugins-external',
when(opsys('mingw32'), expect_broken(20706))],
makefile_test, [])
+# See comment above plugin-recomp-pure for explanation of AArch64 Darwin fragility
test('test-phase-hooks-plugin',
[extra_files(['hooks-plugin/']),
+ when(opsys('darwin') and arch('aarch64') and not ghc_dynamic(), fragile(21972)),
pre_cmd('$MAKE -s --no-print-directory -C hooks-plugin package.test-phase-hooks-plugin TOP={top}')],
compile,
['-package-db hooks-plugin/pkg.test-phase-hooks-plugin/local.package.conf -fplugin Hooks.PhasePlugin -package hooks-plugin ' + config.plugin_way_flags])