diff --git a/.github/scripts/benchmark_formatter.py b/.github/scripts/benchmark_formatter.py
new file mode 100644
index 00000000..11d02034
--- /dev/null
+++ b/.github/scripts/benchmark_formatter.py
@@ -0,0 +1,247 @@
+import pathlib, re, sys
+
+try:
+ p = pathlib.Path("comparison.md")
+ if not p.exists():
+ print("comparison.md not found, skipping post-processing.")
+ sys.exit(0)
+
+ lines = p.read_text(encoding="utf-8").splitlines()
+ processed_lines = []
+ in_code = False
+
+ def strip_worker_suffix(text: str) -> str:
+ return re.sub(r"(\S+?)-\d+(\s|$)", r"\1\2", text)
+
+ def get_icon(diff_val: float) -> str:
+ if diff_val > 10:
+ return "š"
+ if diff_val < -10:
+ return "š"
+ return "ā”ļø"
+
+ def clean_superscripts(text: str) -> str:
+ return re.sub(r"[¹²³ā“āµā¶ā·āøā¹ā°]", "", text)
+
+ def parse_val(token: str):
+ if "%" in token or "=" in token:
+ return None
+ token = clean_superscripts(token)
+ token = token.split("±")[0].strip()
+ token = token.split("(")[0].strip()
+ if not token:
+ return None
+
+ m = re.match(r"^([-+]?\d*\.?\d+)([a-zA-Zµ]+)?$", token)
+ if not m:
+ return None
+ try:
+ val = float(m.group(1))
+ except ValueError:
+ return None
+ suffix = (m.group(2) or "").replace("µ", "u")
+ multipliers = {
+ "n": 1e-9,
+ "ns": 1e-9,
+ "u": 1e-6,
+ "us": 1e-6,
+ "m": 1e-3,
+ "ms": 1e-3,
+ "s": 1.0,
+ "k": 1e3,
+ "K": 1e3,
+ "M": 1e6,
+ "G": 1e9,
+ "Ki": 1024.0,
+ "Mi": 1024.0**2,
+ "Gi": 1024.0**3,
+ "Ti": 1024.0**4,
+ "B": 1.0,
+ "B/op": 1.0,
+ "C": 1.0, # tolerate degree/unit markers that don't affect ratio
+ }
+ return val * multipliers.get(suffix, 1.0)
+
+ def extract_two_numbers(tokens):
+ found = []
+ for t in tokens[1:]: # skip name
+ if t in {"±", "ā", "~", "ā", "ā"}:
+ continue
+ if "%" in t or "=" in t:
+ continue
+ val = parse_val(t)
+ if val is not None:
+ found.append(val)
+ if len(found) == 2:
+ break
+ return found
+
+ # Pass 0:
+ # 1. find a header line with pipes to derive alignment hint
+ # 2. calculate max content width to ensure right-most alignment
+ max_content_width = 0
+
+ for line in lines:
+ if line.strip() == "```":
+ in_code = not in_code
+ continue
+ if not in_code:
+ continue
+
+ # Skip footnotes/meta for width calculation
+ if re.match(r"^\s*[¹²³ā“āµā¶ā·āøā¹ā°]", line) or re.search(r"need\s*>?=\s*\d+\s+samples", line):
+ continue
+ if not line.strip() or line.strip().startswith(("goos:", "goarch:", "pkg:", "cpu:")):
+ continue
+ # Header lines are handled separately in Pass 1
+ if "ā" in line and ("vs base" in line or "old" in line or "new" in line):
+ continue
+
+ # It's likely a data line
+ # Check if it has an existing percentage we might move/align
+ curr_line = strip_worker_suffix(line).rstrip()
+ pct_match = re.search(r"([+-]?\d+\.\d+)%", curr_line)
+ if pct_match:
+ # If we are going to realign this, we count width up to the percentage
+ w = len(curr_line[: pct_match.start()].rstrip())
+ else:
+ w = len(curr_line)
+
+ if w > max_content_width:
+ max_content_width = w
+
+ # Calculate global alignment target for Diff column
+ # Ensure target column is beyond the longest line with some padding
+ diff_col_start = max_content_width + 4
+
+ # Calculate right boundary (pipe) position
+ # Diff column width ~12 chars (e.g. "+100.00% š")
+ right_boundary = diff_col_start + 14
+
+ for line in lines:
+
+ if line.strip() == "```":
+ in_code = not in_code
+ processed_lines.append(line)
+ continue
+
+ if not in_code:
+ processed_lines.append(line)
+ continue
+
+ # footnotes keep untouched
+ if re.match(r"^\s*[¹²³ā“āµā¶ā·āøā¹ā°]", line) or re.search(r"need\s*>?=\s*\d+\s+samples", line):
+ processed_lines.append(line)
+ continue
+
+ # header lines: ensure last column labeled Diff and force alignment
+ if "ā" in line and ("vs base" in line or "old" in line or "new" in line):
+ # Strip trailing pipe and whitespace
+ stripped_header = line.rstrip().rstrip("ā").rstrip()
+
+ # If "vs base" is present, ensure we don't duplicate "Diff" if it's already there
+ # But we want to enforce OUR alignment, so we might strip existing Diff
+ stripped_header = re.sub(r"\s+Diff\s*$", "", stripped_header, flags=re.IGNORECASE)
+ stripped_header = re.sub(r"\s+Delta\b", "", stripped_header, flags=re.IGNORECASE)
+
+ # Pad to diff_col_start
+ padding = diff_col_start - len(stripped_header)
+ if padding < 2:
+ padding = 2 # minimum spacing
+
+ if len(stripped_header) < diff_col_start:
+ new_header = stripped_header + " " * (diff_col_start - len(stripped_header))
+ else:
+ new_header = stripped_header + " "
+
+ # Add Diff column header if it's the second header row (vs base)
+ if "vs base" in line or "new pr.json" in line:
+ new_header += "Diff"
+
+ # Add closing pipe at the right boundary
+ current_len = len(new_header)
+ if current_len < right_boundary:
+ new_header += " " * (right_boundary - current_len)
+
+ new_header += "ā"
+ processed_lines.append(new_header)
+ continue
+
+ # non-data meta lines
+ if not line.strip() or line.strip().startswith(("goos:", "goarch:", "pkg:")):
+ processed_lines.append(line)
+ continue
+
+ original_line = line
+ line = strip_worker_suffix(line)
+ tokens = line.split()
+ if not tokens:
+ processed_lines.append(line)
+ continue
+
+ numbers = extract_two_numbers(tokens)
+ pct_match = re.search(r"([+-]?\d+\.\d+)%", line)
+
+ # Helper to align and append
+ def append_aligned(left_part, content):
+ if len(left_part) < diff_col_start:
+ aligned = left_part + " " * (diff_col_start - len(left_part))
+ else:
+ aligned = left_part + " "
+
+ return f"{aligned}{content}"
+
+ # Special handling for geomean when values missing or zero
+ is_geomean = tokens[0] == "geomean"
+ if is_geomean and (len(numbers) < 2 or any(v == 0 for v in numbers)) and not pct_match:
+ leading = re.match(r"^\s*", line).group(0)
+ left = f"{leading}geomean"
+ processed_lines.append(append_aligned(left, "n/a (has zero)"))
+ continue
+
+ # when both values are zero, force diff = 0 and align
+ if len(numbers) == 2 and numbers[0] == 0 and numbers[1] == 0:
+ diff_val = 0.0
+ icon = get_icon(diff_val)
+ left = line.rstrip()
+ processed_lines.append(append_aligned(left, f"{diff_val:+.2f}% {icon}"))
+ continue
+
+ # recompute diff when we have two numeric values
+ if len(numbers) == 2 and numbers[0] != 0:
+ diff_val = (numbers[1] - numbers[0]) / numbers[0] * 100
+ icon = get_icon(diff_val)
+
+ left = line
+ if pct_match:
+ left = line[: pct_match.start()].rstrip()
+ else:
+ left = line.rstrip()
+
+ processed_lines.append(append_aligned(left, f"{diff_val:+.2f}% {icon}"))
+ continue
+
+ # fallback: align existing percentage to Diff column and (re)append icon
+ if pct_match:
+ try:
+ pct_val = float(pct_match.group(1))
+ icon = get_icon(pct_val)
+
+ left = line[: pct_match.start()].rstrip()
+ suffix = line[pct_match.end() :]
+ # Remove any existing icon after the percentage to avoid duplicates
+ suffix = re.sub(r"\s*(š|š|ā”ļø)", "", suffix)
+
+ processed_lines.append(append_aligned(left, f"{pct_val:+.2f}% {icon}{suffix}"))
+ except ValueError:
+ processed_lines.append(line)
+ continue
+
+ # If we cannot parse numbers or percentages, keep the original (only worker suffix stripped)
+ processed_lines.append(line)
+
+ p.write_text("\n".join(processed_lines) + "\n", encoding="utf-8")
+
+except Exception as e:
+ print(f"Error post-processing comparison.md: {e}")
+ sys.exit(1)
diff --git a/.github/scripts/download_artifact.js b/.github/scripts/download_artifact.js
new file mode 100644
index 00000000..a3fddde8
--- /dev/null
+++ b/.github/scripts/download_artifact.js
@@ -0,0 +1,32 @@
+module.exports = async ({github, context, core}) => {
+ try {
+ const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: context.payload.workflow_run.id,
+ });
+
+ const matchArtifact = artifacts.data.artifacts.find((artifact) => {
+ return artifact.name == "benchmark-results";
+ });
+
+ if (!matchArtifact) {
+ core.setFailed("No artifact named 'benchmark-results' found.");
+ return;
+ }
+
+ const download = await github.rest.actions.downloadArtifact({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ artifact_id: matchArtifact.id,
+ archive_format: 'zip',
+ });
+
+ const fs = require('fs');
+ const path = require('path');
+ const workspace = process.env.GITHUB_WORKSPACE;
+ fs.writeFileSync(path.join(workspace, 'benchmark-results.zip'), Buffer.from(download.data));
+ } catch (error) {
+ core.setFailed(`Failed to download artifact: ${error.message}`);
+ }
+};
diff --git a/.github/scripts/format_google_benchmark_data.py b/.github/scripts/format_google_benchmark_data.py
new file mode 100644
index 00000000..17b129e8
--- /dev/null
+++ b/.github/scripts/format_google_benchmark_data.py
@@ -0,0 +1,106 @@
+import json
+import os
+import sys
+import datetime
+import re
+
+def normalize_name(name):
+ # Remove prefix if exists (e.g., "BenchmarkModel/")
+ if "/" in name:
+ name = name.split("/", 1)[1]
+
+ # Clean up name similar to pycasbin
+ parts = name.split("_")
+ new_parts = []
+ for p in parts:
+ if p.lower() in ["rbac", "abac", "acl", "api", "rest"]:
+ new_parts.append(p.upper())
+ else:
+ new_parts.append(p.capitalize())
+ return "".join(new_parts)
+
+
+def main():
+ if len(sys.argv) < 3:
+ print("Usage: python format_google_benchmark_data.py input.json output.json")
+ sys.exit(1)
+
+ input_path = sys.argv[1]
+ output_path = sys.argv[2]
+
+ try:
+ with open(input_path, "r", encoding="utf-8") as f:
+ data = json.load(f)
+ except Exception as e:
+ print(f"Error loading {input_path}: {e}")
+ sys.exit(1)
+
+ # Get commit info from environment variables
+ commit_info = {
+ "author": {
+ "email": os.environ.get("COMMIT_AUTHOR_EMAIL", ""),
+ "name": os.environ.get("COMMIT_AUTHOR_NAME", ""),
+ "username": os.environ.get("COMMIT_AUTHOR_USERNAME", ""),
+ },
+ "committer": {
+ "email": os.environ.get("COMMIT_COMMITTER_EMAIL", ""),
+ "name": os.environ.get("COMMIT_COMMITTER_NAME", ""),
+ "username": os.environ.get("COMMIT_COMMITTER_USERNAME", ""),
+ },
+ "distinct": True,
+ "id": os.environ.get("COMMIT_ID", ""),
+ "message": os.environ.get("COMMIT_MESSAGE", ""),
+ "timestamp": os.environ.get("COMMIT_TIMESTAMP", ""),
+ "tree_id": os.environ.get("COMMIT_TREE_ID", ""),
+ "url": os.environ.get("COMMIT_URL", ""),
+ }
+
+ # Get CPU count
+ cpu_count = data.get("context", {}).get("num_cpus")
+ if not cpu_count:
+ cpu_count = os.cpu_count() or 1
+
+ benches = []
+ for bench in data.get("benchmarks", []):
+ # Skip aggregate items (mean, median, stddev) if any
+ if "run_type" in bench and bench["run_type"] == "aggregate":
+ continue
+
+ name = bench["name"]
+
+ # Google Benchmark outputs time in the unit specified by time_unit
+ # We want to standardize on ns/op
+ val = bench["real_time"]
+ unit = bench.get("time_unit", "ns")
+
+ if unit == "ms":
+ val *= 1e6
+ elif unit == "us":
+ val *= 1e3
+ elif unit == "s":
+ val *= 1e9
+
+ # Extra info
+ iterations = bench.get("iterations", 0)
+ extra = f"{iterations} times"
+
+ benches.append(
+ {"name": normalize_name(name), "value": round(val, 2), "unit": "ns/op", "extra": extra}
+ )
+
+ output_data = {
+ "commit": commit_info,
+ "date": int(datetime.datetime.now().timestamp() * 1000),
+ "tool": "cpp",
+ "procs": cpu_count,
+ "benches": benches,
+ }
+
+ with open(output_path, "w", encoding="utf-8") as f:
+ json.dump(output_data, f, indent=2)
+
+ print(f"Successfully formatted benchmark data to {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/scripts/merge_benchmarks.py b/.github/scripts/merge_benchmarks.py
new file mode 100644
index 00000000..2e10962d
--- /dev/null
+++ b/.github/scripts/merge_benchmarks.py
@@ -0,0 +1,47 @@
+import json
+import sys
+import glob
+
+def merge_jsons(output_file, input_files):
+ merged_data = None
+ all_benchmarks = []
+
+ for file_path in input_files:
+ try:
+ with open(file_path, 'r', encoding='utf-8') as f:
+ data = json.load(f)
+ if merged_data is None:
+ merged_data = data
+
+ # Handle Google Benchmark format
+ if 'benchmarks' in data:
+ all_benchmarks.extend(data['benchmarks'])
+ # Handle list format (legacy/JMH)
+ elif isinstance(data, list):
+ all_benchmarks.extend(data)
+ except Exception as e:
+ print(f"Warning: Failed to parse {file_path}: {e}")
+
+ if merged_data:
+ # If merged_data was a list, make it a dict
+ if isinstance(merged_data, list):
+ merged_data = {'benchmarks': []}
+
+ merged_data['benchmarks'] = all_benchmarks
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(merged_data, f, indent=4)
+
+if __name__ == "__main__":
+ if len(sys.argv) < 3:
+ print("Usage: python merge_benchmarks.py output.json input1.json input2.json ...")
+ sys.exit(1)
+
+ output_file = sys.argv[1]
+ input_files = sys.argv[2:]
+
+ # Expand globs if shell didn't
+ expanded_inputs = []
+ for p in input_files:
+ expanded_inputs.extend(glob.glob(p))
+
+ merge_jsons(output_file, expanded_inputs)
diff --git a/.github/scripts/post_comment.js b/.github/scripts/post_comment.js
new file mode 100644
index 00000000..4be1f42b
--- /dev/null
+++ b/.github/scripts/post_comment.js
@@ -0,0 +1,63 @@
+module.exports = async ({github, context, core}) => {
+ const fs = require('fs');
+
+ // Validate pr_number.txt
+ if (!fs.existsSync('pr_number.txt')) {
+ core.setFailed("Required artifact file 'pr_number.txt' was not found in the workspace.");
+ return;
+ }
+ const prNumberContent = fs.readFileSync('pr_number.txt', 'utf8').trim();
+ const issue_number = parseInt(prNumberContent, 10);
+ if (!Number.isFinite(issue_number) || issue_number <= 0) {
+ core.setFailed('Invalid PR number in pr_number.txt: "' + prNumberContent + '"');
+ return;
+ }
+
+ // Validate comparison.md
+ if (!fs.existsSync('comparison.md')) {
+ core.setFailed("Required artifact file 'comparison.md' was not found in the workspace.");
+ return;
+ }
+ let comparison;
+ try {
+ comparison = fs.readFileSync('comparison.md', 'utf8');
+ } catch (error) {
+ core.setFailed("Failed to read 'comparison.md': " + error.message);
+ return;
+ }
+
+ // Find existing comment
+ const { data: comments } = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issue_number,
+ });
+
+ const botComment = comments.find(comment =>
+ comment.user.type === 'Bot' &&
+ comment.body.includes('Benchmark Comparison')
+ );
+
+ const footer = 'š¤ This comment will be automatically updated with the latest benchmark results.';
+ const commentBody = `${comparison}\n\n${footer}`;
+
+ try {
+ if (botComment) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ body: commentBody
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: issue_number,
+ body: commentBody
+ });
+ }
+ } catch (error) {
+ core.setFailed('Failed to post or update GitHub comment: ' + error.message);
+ }
+};
diff --git a/.github/scripts/pytest_benchstat.py b/.github/scripts/pytest_benchstat.py
new file mode 100644
index 00000000..254c38ec
--- /dev/null
+++ b/.github/scripts/pytest_benchstat.py
@@ -0,0 +1,235 @@
+import json
+import sys
+import math
+import re
+import platform
+import subprocess
+
+# Force UTF-8 output for Windows
+sys.stdout.reconfigure(encoding="utf-8")
+
+
+def load_json(path):
+ try:
+ with open(path, "r", encoding="utf-8") as f:
+ return json.load(f)
+ except Exception as e:
+ print(f"Error loading {path}: {e}", file=sys.stderr)
+ return None
+
+
+def format_val(val_ns):
+ if val_ns is None:
+ return "N/A"
+ # val_ns is in nanoseconds (Google Benchmark time_unit usually ns)
+ if val_ns < 1000:
+ return f"{val_ns:.2f}ns"
+ if val_ns < 1e6:
+ return f"{val_ns/1e3:.2f}us"
+ if val_ns < 1e9:
+ return f"{val_ns/1e6:.2f}ms"
+ return f"{val_ns/1e9:.2f}s"
+
+
+def normalize_name(name):
+ # Google Benchmark names: "Suite/Test/..."
+ # Clean up common prefixes or suffixes if needed
+ return name
+
+
+def parse_google_benchmark(data):
+ """Parses Google Benchmark JSON data."""
+ benchmarks = data.get("benchmarks", [])
+ parsed = {}
+
+ # Group by name (handle repetitions if present)
+ grouped = {}
+
+ for b in benchmarks:
+ name = b.get("name")
+ # If using aggregates (mean, median, stddev), the name might have suffix
+ if name.endswith("_mean") or name.endswith("_median") or name.endswith("_stddev"):
+ real_name = name.rsplit("_", 1)[0]
+ metric = name.rsplit("_", 1)[1]
+ if real_name not in grouped:
+ grouped[real_name] = {"samples": []}
+ grouped[real_name][metric] = b.get("real_time") # Use real_time or cpu_time
+ continue
+
+ if name not in grouped:
+ grouped[name] = {"samples": []}
+
+ # Collect raw samples if available
+ grouped[name]["samples"].append(b.get("real_time"))
+ grouped[name]["unit"] = b.get("time_unit", "ns")
+
+ for name, data in grouped.items():
+ # Google Benchmark output is usually in time_unit (default ns)
+ # We want to normalize to ns for internal consistency with format_val
+ unit = data.get("unit", "ns")
+ mult = 1.0
+ if unit == "us": mult = 1e3
+ elif unit == "ms": mult = 1e6
+ elif unit == "s": mult = 1e9
+
+ mean = 0.0
+ stddev = 0.0
+ rounds = len(data["samples"])
+
+ if "mean" in data:
+ mean = data["mean"] * mult
+ if "stddev" in data:
+ stddev = data["stddev"] * mult
+ else:
+ stddev = 0.0
+ # Estimate rounds if aggregates only
+ if rounds == 0: rounds = 1 # We don't know exact rounds if only aggregate provided
+ elif rounds > 0:
+ # Calculate from samples
+ samples = [s * mult for s in data["samples"]]
+ mean = sum(samples) / rounds
+ if rounds > 1:
+ variance = sum((x - mean) ** 2 for x in samples) / (rounds - 1)
+ stddev = math.sqrt(variance)
+ else:
+ stddev = 0.0
+
+ parsed[name] = {
+ "mean": mean,
+ "stddev": stddev,
+ "rounds": rounds
+ }
+
+ return parsed
+
+def main():
+ if len(sys.argv) < 3:
+ print("Usage: python pytest_benchstat.py base.json pr.json")
+ sys.exit(1)
+
+ base_data = load_json(sys.argv[1])
+ pr_data = load_json(sys.argv[2])
+
+ if not base_data or not pr_data:
+ sys.exit(1)
+
+ # Detect format and parse
+ # Try Google Benchmark first (has "benchmarks" list with "real_time")
+ # Fallback to Pytest-benchmark (has "benchmarks" list with "stats")
+
+ def parse_any(data):
+ if "benchmarks" in data and len(data["benchmarks"]) > 0:
+ first = data["benchmarks"][0]
+ if "stats" in first:
+ # Pytest-benchmark
+ return {b["name"]: b["stats"] for b in data["benchmarks"]}
+ elif "real_time" in first or "cpu_time" in first:
+ # Google Benchmark
+ return parse_google_benchmark(data)
+ return {}
+
+ base_map = parse_any(base_data)
+ pr_map = parse_any(pr_data)
+
+ all_names = sorted(set(base_map.keys()) | set(pr_map.keys()))
+
+ # Print Header
+ print("goos: linux")
+ print("goarch: amd64")
+ print("pkg: github.com/casbin/casbin-cpp")
+
+ cpu_info = "GitHub Actions Runner"
+ # Try to get CPU info from JSON context if available
+ if "context" in base_data and "cpu_model" in base_data["context"]:
+ cpu_info = base_data["context"]["cpu_model"]
+
+ print(f"cpu: {cpu_info}")
+ print("")
+
+ w_name = 50
+ w_val = 20
+
+ # Header
+ print(f"{'':<{w_name}}ā old base.json ā new pr.json ā")
+ print(f"{'':<{w_name}}ā sec/op ā sec/op ā")
+
+ base_means = []
+ pr_means = []
+
+ # Footnote tracking
+ need_low_sample_note = False
+ need_insignificant_note = False
+
+ for name in all_names:
+ base = base_map.get(name)
+ pr = pr_map.get(name)
+
+ base_mean = base["mean"] if base else 0
+ pr_mean = pr["mean"] if pr else 0
+
+ base_std = base["stddev"] if base else 0
+ pr_std = pr["stddev"] if pr else 0
+
+ base_rounds = base["rounds"] if base else 0
+ pr_rounds = pr["rounds"] if pr else 0
+
+ if base_mean > 0:
+ base_means.append(base_mean)
+ if pr_mean > 0:
+ pr_means.append(pr_mean)
+
+ # Format Value with StdDev and Superscript
+ def format_cell(val, std, rounds):
+ if val == 0:
+ return "N/A"
+
+ # StdDev formatting
+ if rounds < 2 or std == 0:
+ std_str = "± ā"
+ else:
+ pct = (std / val) * 100
+ std_str = f"± {pct:.0f}%"
+
+ # Superscript for low sample size
+ note = ""
+ if rounds < 6:
+ note = "¹"
+ nonlocal need_low_sample_note
+ need_low_sample_note = True
+
+ return f"{format_val(val)} {std_str} {note}"
+
+ base_str = format_cell(base_mean, base_std, base_rounds) if base else "N/A"
+ pr_str = format_cell(pr_mean, pr_std, pr_rounds) if pr else "N/A"
+
+ display_name = normalize_name(name)
+
+ print(f"{display_name:<{w_name}} {base_str:<{w_val}} {pr_str:<{w_val}}")
+
+ if base_means and pr_means:
+ # Filter out zero values for geomean calculation
+ base_geo_input = [x for x in base_means if x > 0]
+ pr_geo_input = [x for x in pr_means if x > 0]
+
+ g_base_str = "N/A"
+ g_pr_str = "N/A"
+
+ if base_geo_input:
+ g_base = math.exp(sum(math.log(x) for x in base_geo_input) / len(base_geo_input))
+ g_base_str = f"{format_val(g_base)}"
+
+ if pr_geo_input:
+ g_pr = math.exp(sum(math.log(x) for x in pr_geo_input) / len(pr_geo_input))
+ g_pr_str = f"{format_val(g_pr)}"
+
+ print(f"{'geomean':<{w_name}} {g_base_str:<{w_val}} {g_pr_str:<{w_val}}")
+
+ # Print Footnotes
+ if need_low_sample_note:
+ print("¹ need >= 6 samples for confidence interval at level 0.95")
+ if need_insignificant_note:
+ print("² need >= 4 samples to detect a difference at alpha level 0.05")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/workflows/benchmark-push.yml b/.github/workflows/benchmark-push.yml
new file mode 100644
index 00000000..ffce645d
--- /dev/null
+++ b/.github/workflows/benchmark-push.yml
@@ -0,0 +1,94 @@
+# Copyright 2021 The casbin Authors. All Rights Reserved.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: Push Benchmark Data
+
+on:
+ push:
+ branches:
+ - master
+
+jobs:
+ benchmark:
+ name: Run Benchmark & Push Data
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ id: checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Setup CMake
+ uses: jwlawson/actions-setup-cmake@v2
+ with:
+ cmake-version: '3.25.x'
+ - name: Configuring CMake files
+ id: building-files
+ run: |
+ mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE:STRING=Release
+ - name: Building library
+ id: building-lib
+ run: |
+ cd build && cmake --build . --config Release --target all -j 10 --
+ - name: Run Benchmark
+ id: run-benchmark
+ run: |
+ cd ./build/tests/benchmarks
+ ./casbin_benchmark --benchmark_format=json > benchmark_result.json
+ mv benchmark_result.json ../../../
+
+ - name: Checkout Data Repo
+ uses: actions/checkout@v4
+ with:
+ repository: casbin/casbin-benchmark-data
+ token: ${{ secrets.CASBIN_BENCHMARK_DATA_TOKEN }}
+ path: benchmark-data
+
+ - name: Format Benchmark Data
+ env:
+ COMMIT_AUTHOR_EMAIL: ${{ github.event.head_commit.author.email }}
+ COMMIT_AUTHOR_NAME: ${{ github.event.head_commit.author.name }}
+ COMMIT_AUTHOR_USERNAME: ${{ github.event.head_commit.author.username }}
+ COMMIT_COMMITTER_EMAIL: ${{ github.event.head_commit.committer.email }}
+ COMMIT_COMMITTER_NAME: ${{ github.event.head_commit.committer.name }}
+ COMMIT_COMMITTER_USERNAME: ${{ github.event.head_commit.committer.username }}
+ COMMIT_MESSAGE: ${{ github.event.head_commit.message }}
+ COMMIT_TIMESTAMP: ${{ github.event.head_commit.timestamp }}
+ COMMIT_URL: ${{ github.event.head_commit.url }}
+ COMMIT_ID: ${{ github.event.head_commit.id }}
+ run: |
+ python .github/scripts/format_google_benchmark_data.py benchmark_result.json formatted_result.json
+
+ - name: Push Benchmark Result
+ working-directory: benchmark-data
+ run: |
+ mkdir -p casbin-cpp
+ cp ../formatted_result.json casbin-cpp/benchmark-${{ github.sha }}.json
+
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+
+ git add casbin-cpp/benchmark-${{ github.sha }}.json
+ git commit -m "Add benchmark result for casbin-cpp commit ${{ github.sha }}"
+ git push
+
+ - name: Cleanup
+ id: clean-up
+ if: always()
+ run: |
+ rm -rf build benchmark-data benchmark_result.json formatted_result.json
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
deleted file mode 100644
index 367b35d9..00000000
--- a/.github/workflows/benchmark.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2021 The casbin Authors. All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-name: Benchmark
-
-on:
- push:
- branches:
- - master
- pull_request:
-
-jobs:
- benchmark:
- name: Benchmark
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- id: checkout
- uses: actions/checkout@v4
- - name: Setup CMake
- uses: jwlawson/actions-setup-cmake@v2
- with:
- cmake-version: '3.25.x'
- - name: Configuring CMake files
- id: building-files
- run: |
- mkdir build && cd build && cmake .. -DCMAKE_BUILD_TYPE:STRING=Release
- - name: Building library
- id: building-lib
- run: |
- cd build && cmake --build . --config Release --target all -j 10 --
- - name: Run Benchmark
- id: run-benchmark
- run: |
- cd ./build/tests/benchmarks
- ./casbin_benchmark
- - name: Cleanup
- id: clean-up
- run: |
- rm -r build
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 35c0fde9..1f624947 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -28,6 +28,11 @@ jobs:
- name: Checkout
id: checkout
uses: actions/checkout@v4
+ - name: Install dependencies (act workaround)
+ if: ${{ env.ACT }}
+ run: |
+ sudo apt-get update || true
+ sudo apt-get install -y python3-dev
- name: Setup CMake
uses: jwlawson/actions-setup-cmake@v2
with:
diff --git a/.github/workflows/comment.yml b/.github/workflows/comment.yml
new file mode 100644
index 00000000..feb02e33
--- /dev/null
+++ b/.github/workflows/comment.yml
@@ -0,0 +1,37 @@
+name: Post Benchmark Comment
+
+on:
+ workflow_run:
+ workflows: ["Performance Comparison for Pull Requests"]
+ types:
+ - completed
+
+permissions:
+ pull-requests: write
+
+jobs:
+ comment:
+ runs-on: ubuntu-latest
+ if: >
+ github.event.workflow_run.event == 'pull_request' &&
+ github.event.workflow_run.conclusion == 'success'
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: 'Download artifact'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const script = require('./.github/scripts/download_artifact.js')
+ await script({github, context, core})
+
+ - name: 'Unzip artifact'
+ run: unzip benchmark-results.zip
+
+ - name: 'Post comment'
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const script = require('./.github/scripts/post_comment.js')
+ await script({github, context, core})
diff --git a/.github/workflows/performance-pr.yml b/.github/workflows/performance-pr.yml
new file mode 100644
index 00000000..c6404e38
--- /dev/null
+++ b/.github/workflows/performance-pr.yml
@@ -0,0 +1,162 @@
+name: Performance Comparison for Pull Requests
+
+on:
+ pull_request:
+ branches: [master]
+
+jobs:
+ benchmark-pr:
+ name: Run Benchmark
+ runs-on: ubuntu-latest
+ steps:
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y build-essential cmake git python3 jq wget dos2unix
+
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+
+ - name: Pre-fetch dependencies
+ run: |
+ # json
+ wget https://github.com/nlohmann/json/archive/refs/tags/v3.11.2.tar.gz -O json.tar.gz
+ mkdir -p json-src
+ tar -xzf json.tar.gz -C json-src --strip-components=1
+ echo "JSON_SRC=$(pwd)/json-src" >> $GITHUB_ENV
+
+ # googletest
+ wget https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz -O gtest.tar.gz
+ mkdir -p gtest-src
+ tar -xzf gtest.tar.gz -C gtest-src --strip-components=1
+ echo "GTEST_SRC=$(pwd)/gtest-src" >> $GITHUB_ENV
+
+ # benchmark
+ wget https://github.com/google/benchmark/archive/refs/tags/v1.8.3.tar.gz -O bench.tar.gz
+ mkdir -p bench-src
+ tar -xzf bench.tar.gz -C bench-src --strip-components=1
+ echo "BENCH_SRC=$(pwd)/bench-src" >> $GITHUB_ENV
+
+ - name: Build and Run benchmark on PR branch
+ run: |
+ find . -name "*.conf" -exec dos2unix {} +
+ find . -name "*.csv" -exec dos2unix {} +
+
+ cmake -B build-pr -S . \
+ -DCASBIN_BUILD_BENCHMARK=ON \
+ -DCASBIN_BUILD_TEST=ON \
+ -DCASBIN_BUILD_PYTHON_BINDINGS=OFF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DFETCHCONTENT_SOURCE_DIR_JSON=$JSON_SRC \
+ -DFETCHCONTENT_SOURCE_DIR_GOOGLETEST=$GTEST_SRC \
+ -DFETCHCONTENT_SOURCE_DIR_BENCHMARK=$BENCH_SRC
+
+ cmake --build build-pr --target casbin_benchmark -j2
+ ./build-pr/tests/benchmarks/casbin_benchmark --benchmark_format=json > pr-bench.json
+
+ - name: Checkout base branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.base.sha }}
+ path: base
+
+ - name: Build and Run benchmark on base branch
+ run: |
+ cd base
+ find . -name "*.conf" -exec dos2unix {} +
+ find . -name "*.csv" -exec dos2unix {} +
+
+ cmake -B build-base -S . \
+ -DCASBIN_BUILD_BENCHMARK=ON \
+ -DCASBIN_BUILD_TEST=ON \
+ -DCASBIN_BUILD_PYTHON_BINDINGS=OFF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DFETCHCONTENT_SOURCE_DIR_JSON=$JSON_SRC \
+ -DFETCHCONTENT_SOURCE_DIR_GOOGLETEST=$GTEST_SRC \
+ -DFETCHCONTENT_SOURCE_DIR_BENCHMARK=$BENCH_SRC
+
+ cmake --build build-base --target casbin_benchmark -j2
+ ./build-base/tests/benchmarks/casbin_benchmark --benchmark_format=json > ../base-bench.json
+
+ # Move to subfolder for upload as requested
+ - name: Move results to subfolder
+ run: |
+ mkdir -p casbin-cpp
+ mv pr-bench.json casbin-cpp/
+ mv base-bench.json casbin-cpp/
+
+ - name: Upload benchmark shard
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-shard-main
+ path: casbin-cpp/*.json
+
+ report:
+ name: Generate Report
+ needs: benchmark-pr
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Download all benchmark shards
+ uses: actions/download-artifact@v4
+ with:
+ pattern: benchmark-shard-*
+ merge-multiple: true
+ path: benchmark_data/casbin-cpp
+
+ - name: Merge Benchmark Results
+ run: |
+ python .github/scripts/merge_benchmarks.py base.json benchmark_data/casbin-cpp/base-*.json
+ python .github/scripts/merge_benchmarks.py pr.json benchmark_data/casbin-cpp/pr-*.json
+
+ - name: Save commit info
+ id: commits
+ run: |
+ BASE_SHA="${{ github.event.pull_request.base.sha }}"
+ HEAD_SHA="${{ github.event.pull_request.head.sha }}"
+ echo "base_short=${BASE_SHA:0:7}" >> $GITHUB_OUTPUT
+ echo "head_short=${HEAD_SHA:0:7}" >> $GITHUB_OUTPUT
+
+ - name: Compare benchmarks
+ id: benchstat
+ run: |
+ cat > comparison.md << 'EOF'
+ ## Benchmark Comparison
+
+ Comparing base branch (`${{ steps.commits.outputs.base_short }}`)
+ vs PR branch (`${{ steps.commits.outputs.head_short }}`)
+
+ ```
+ EOF
+ python .github/scripts/pytest_benchstat.py base.json pr.json >> comparison.md || true
+ echo '```' >> comparison.md
+
+ # Post-process
+ python .github/scripts/benchmark_formatter.py
+
+ - name: Save PR number
+ run: |
+ PR_NUMBER="${{ github.event.pull_request.number }}"
+ if [ -z "$PR_NUMBER" ]; then
+ echo "Error: Pull request number is not available in event payload." >&2
+ exit 1
+ fi
+ echo "$PR_NUMBER" > pr_number.txt
+
+ - name: Upload benchmark results
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-results
+ path: |
+ comparison.md
+ pr_number.txt
+
diff --git a/casbin/config/config.cpp b/casbin/config/config.cpp
index 7c4b9495..acddbba2 100644
--- a/casbin/config/config.cpp
+++ b/casbin/config/config.cpp
@@ -68,6 +68,8 @@ void Config::ParseBuffer(std::istream* buf) {
} else
break;
line = Trim(line);
+ if (line.empty())
+ continue;
if (line.find(DEFAULT_COMMENT) == 0)
continue;
else if (line.find(DEFAULT_COMMENT_SEM) == 0)
diff --git a/tests/config_test.cpp b/tests/config_test.cpp
index 0eff9011..cb4e22a4 100644
--- a/tests/config_test.cpp
+++ b/tests/config_test.cpp
@@ -19,9 +19,11 @@
#include
#include
+#include "config_path.h"
+
namespace {
-std::shared_ptr GetTestConfig() { return casbin::Config::NewConfig("../../casbin/config/testdata/testini.ini"); }
+std::shared_ptr GetTestConfig() { return casbin::Config::NewConfig(relative_path + "/casbin/config/testdata/testini.ini"); }
TEST(TestConfig, TestDebug) {
auto config = GetTestConfig();