Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions torchci/components/benchmark_v3/configs/configurations.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ import {
PYTORCH_VLLM_BENCHMARK_ID,
PytorchVllmBenchmarkDashoboardConfig,
} from "./teams/vllm/config";
import {
PYTORCH_X_VLLM_BENCHMARK_ID,
PytorchXVllmBenchmarkDashboardConfig,
} from "./teams/vllm/pytorch_x_vllm_config";

export const REPORT_ID_TO_BENCHMARK_ID_MAPPING: Record<string, string> = {
compiler_regression: "compiler_inductor",
Expand All @@ -40,6 +44,9 @@ export const PREDEFINED_BENCHMARK_CONFIG: BenchmarkConfigMap = {
[COMPILTER_BENCHMARK_NAME]: {
[BenchmarkPageType.DashboardPage]: CompilerDashboardBenchmarkUIConfig,
},
[PYTORCH_X_VLLM_BENCHMARK_ID]: {
[BenchmarkPageType.DashboardPage]: PytorchXVllmBenchmarkDashboardConfig,
},
[COMPILTER_PRECOMPUTE_BENCHMARK_ID]: {
[BenchmarkPageType.AggregatePage]: CompilerPrecomputeBenchmarkUIConfig,
},
Expand Down Expand Up @@ -69,6 +76,11 @@ export const BENCHMARK_ID_MAPPING: Record<string, BenchmarkIdMappingItem> = {
repoName: "pytorch/pytorch",
benchmarkName: "compiler_inductor",
},
[PYTORCH_X_VLLM_BENCHMARK_ID]: {
id: PYTORCH_X_VLLM_BENCHMARK_ID,
repoName: "pytorch/pytorch",
benchmarkName: "PyTorch x vLLM benchmark",
},
[COMPILTER_PRECOMPUTE_BENCHMARK_ID]: {
id: COMPILTER_PRECOMPUTE_BENCHMARK_ID,
repoName: "pytorch/pytorch",
Expand Down Expand Up @@ -151,6 +163,12 @@ export const BENCHMARK_CATEGORIES: BenchmarkCategoryGroup[] = [
},
],
},
{
name: "PyTorch x vLLM Benchmark",
route: `/benchmark/v3/dashboard/${PYTORCH_X_VLLM_BENCHMARK_ID}`,
info: "PyTorch x vLLM nightly benchmark. Powered by [code](TODO) and [benchmark configs](https://github.com/pytorch/pytorch-integration-testing/tree/main/vllm-benchmarks/benchmarks)",
description: "Pytorch x vLLM nightly benchmark using vllm bench",
Copy link
Contributor

@yangw-dev yangw-dev Dec 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@huydhn recommend add comment that it's generated based on pinned vllm commit

Copy link
Contributor

@yangw-dev yangw-dev Dec 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

also, maybe put this under vllm section: "vLLM Benchmarks ", instead of pytorch,
maybe also update the description for the vllmx pinned pytorch description, so that we can tell the difference

},
{
name: "Gpt-fast Benchmark",
route: `/benchmark/v3/dashboard/${PYTORCH_GPTFAST_BENCHMARK_ID}`,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import { BenchmarkUIConfig } from "../../config_book_types";
import {
BRANCH_METADATA_COLUMN,
DEFAULT_DASHBOARD_BENCHMARK_INITIAL,
} from "../defaults/default_dashboard_config";

export const PYTORCH_X_VLLM_BENCHMARK_ID = "pytorch_x_vllm_benchmark";

const COMPARISON_TABLE_METADATA_COLUMNS = [
{
field: "device",
displayName: "Hardware type",
},
{
field: "arch",
displayName: "Hardware model",
},
{
field: "extra_key.use_compile",
displayName: "Use Compile",
},
{
field: "extra_key.request_rate",
displayName: "Request Rate",
},
{
field: "extra_key.tensor_parallel_size",
displayName: "Tensor Parallel",
},
{
field: "extra_key.input_len",
displayName: "Input Len",
},
{
field: "extra_key.output_len",
displayName: "Max Output Len",
},
] as const;

export const PytorchXVllmBenchmarkDashboardConfig: BenchmarkUIConfig = {
benchmarkId: PYTORCH_X_VLLM_BENCHMARK_ID,
apiId: PYTORCH_X_VLLM_BENCHMARK_ID,
title: "PyTorch x vLLM Benchmark",
type: "dashboard",
dataBinding: {
initial: {
...DEFAULT_DASHBOARD_BENCHMARK_INITIAL,
benchmarkId: PYTORCH_X_VLLM_BENCHMARK_ID,
},
required_filter_fields: [],
},
dataRender: {
type: "auto",
subSectionRenders: {
detail_view: {
filterConstraint: {
model: {
disabled: true,
},
deviceName: {
disableOptions: [""],
},
mode: {
disableOptions: [""],
},
},
renders: [
{
type: "AutoBenchmarkTimeSeriesChartGroup",
title: "Metrics Time Series Chart Detail View",
config: {
type: "line",
groupByFields: ["metric"],
lineKey: [
"model",
"extra_key.use_compile",
"extra_key.request_rate",
"extra_key.input_len",
"extra_key.output_len",
"metric",
"branch",
],
chart: {
renderOptions: {
showLegendDetails: true,
},
},
},
},
{
type: "AutoBenchmarkTimeSeriesTable",
title: "Comparison Table Detail View",
config: {
primary: {
fields: ["model"],
displayName: "Model",
},
extraMetadata: COMPARISON_TABLE_METADATA_COLUMNS,
renderOptions: {
missingText: "",
flex: {
primary: 2,
},
},
},
},
{
type: "AutoBenchmarkRawDataTable",
title: "Raw Data Table",
config: {
extraMetadata: [
BRANCH_METADATA_COLUMN,
...COMPARISON_TABLE_METADATA_COLUMNS,
],
},
},
],
},
},
renders: [
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@huydhn I will add another auto component to add pure description for dashboard, it's good that we tells the differerence between this benchmark basedon pinned commit, and another one based on pinned pytorch

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AutoBenchmarkMarkDownContent
#7615

{
type: "AutoBenchmarkPairwiseTable",
title: "Comparison Table",
config: {
primary: {
fields: ["model"],
displayName: "Model",
navigation: {
type: "subSectionRender",
value: "detail_view",
applyFilterFields: ["model", "device", "arch"],
},
},
extraMetadata: COMPARISON_TABLE_METADATA_COLUMNS,
renderOptions: {
missingText: "none",
bothMissingText: "",
flex: {
primary: 2,
},
},
},
},
],
},
};
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ const dataCtors: Record<string, new () => BenchmarkDataFetcher> = {
pytorch_helion: PytorchHelionDataFetcher,
torchao_micro_api_benchmark: PytorchAoMicroApiBenchmarkDataFetcher,
vllm_benchmark: VllmBenchmarkDataFetcher,
pytorch_x_vllm_benchmark: VllmBenchmarkDataFetcher,
default: BenchmarkDataQuery,
};

Expand Down