diff --git a/third_party/buildbuddy/proto/api/v1/common.proto b/third_party/buildbuddy/proto/api/v1/common.proto new file mode 100644 index 000000000..9dedafa16 --- /dev/null +++ b/third_party/buildbuddy/proto/api/v1/common.proto @@ -0,0 +1,112 @@ +syntax = "proto3"; + +package api.v1; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +enum Status { + // The implicit default enum value. Should never be set. + STATUS_UNSPECIFIED = 0; + + // Displays as "Building". Means the target is compiling, linking, etc. + BUILDING = 1; + + // Displays as "Built". Means the target was built successfully. + // If testing was requested, it should never reach this status: it should go + // straight from BUILDING to TESTING. + BUILT = 2; + + // Displays as "Broken". Means build failure such as compile error. + FAILED_TO_BUILD = 3; + + // Displays as "Testing". Means the test is running. + TESTING = 4; + + // Displays as "Passed". Means the test was run and passed. + PASSED = 5; + + // Displays as "Failed". Means the test was run and failed. + FAILED = 6; + + // Displays as "Timed out". Means the test didn't finish in time. + TIMED_OUT = 7; + + // Displays as "Cancelled". Means the build or test was cancelled. + // E.g. User hit control-C. + CANCELLED = 8; + + // Displays as "Tool Failed". Means the build or test had internal tool + // failure. + TOOL_FAILED = 9; + + // Displays as "Incomplete". Means the build or test did not complete. This + // might happen when a build breakage or test failure causes the tool to stop + // trying to build anything more or run any more tests, with the default + // bazel --nokeep_going option or the --notest_keep_going option. + INCOMPLETE = 10; + + // Displays as "Flaky". Means the aggregate status contains some runs that + // were successful, and some that were not. + FLAKY = 11; + + // Displays as "Unknown". Means the tool uploading to the server died + // mid-upload or does not know the state. + UNKNOWN = 12; + + // Displays as "Skipped". Means building and testing were skipped. + // (E.g. Restricted to a different configuration.) + SKIPPED = 13; +} + +// These correspond to the suffix of the rule name. Eg cc_test has type TEST. +enum TargetType { + // Unspecified by the build system. + TARGET_TYPE_UNSPECIFIED = 0; + + // An application e.g. ios_application. + APPLICATION = 1; + + // A binary target e.g. cc_binary. + BINARY = 2; + + // A library target e.g. java_library + LIBRARY = 3; + + // A package + PACKAGE = 4; + + // Any test target, in bazel that means a rule with a '_test' suffix. + TEST = 5; +} + +// Indicates how big the user indicated the test action was. +enum TestSize { + // Unspecified by the user. + TEST_SIZE_UNSPECIFIED = 0; + + // Unit test taking less than 1 minute. + SMALL = 1; + + // Integration tests taking less than 5 minutes. + MEDIUM = 2; + + // End-to-end tests taking less than 15 minutes. + LARGE = 3; + + // Even bigger than LARGE. + ENORMOUS = 4; + + // Something that doesn't fit into the above categories. + OTHER_SIZE = 5; +} + +// The timing of a particular Invocation, Action, etc. The start_time is +// specified, stop time can be calculated by adding duration to start_time. +message Timing { + // The time the resource started running. This is in UTC Epoch time. + google.protobuf.Timestamp start_time = 1; + + // The duration for which the resource ran. + google.protobuf.Duration duration = 2; +} diff --git a/third_party/buildbuddy/proto/api_key.proto b/third_party/buildbuddy/proto/api_key.proto new file mode 100644 index 000000000..522ed2f69 --- /dev/null +++ b/third_party/buildbuddy/proto/api_key.proto @@ -0,0 +1,152 @@ +syntax = "proto3"; + +import "third_party/buildbuddy/proto/context.proto"; + +package api_key; + +// An API key used to access BuildBuddy. +message ApiKey { + // The unique ID of this API key. + // ex: "AK123456789" + string id = 1; + + // The string value of this API key which is passed in API requests. + string value = 2; + + // Optional. The user-specified label of this API key that helps them + // remember what it's for. + string label = 3; + + // A capability associated with an API key. + // + // Values are powers of 2 so that bitmask operations can be used + // to check capabilities. + enum Capability { + UNKNOWN_CAPABILITY = 0; + // Allows writing to the content-addressable store and action cache. + CACHE_WRITE_CAPABILITY = 1; // 2^0 + // Allows registering an executor with the scheduler. + REGISTER_EXECUTOR_CAPABILITY = 2; // 2^1 + // Allows writing to the content-addressable store only. + CAS_WRITE_CAPABILITY = 4; // 2^2 + // Allows changing org-level settings and managing org users, such as via + // the settings UI or via the SCIM API. + ORG_ADMIN_CAPABILITY = 8; // 2^3 + } + + // Capabilities associated with this API key. + repeated Capability capability = 4; + + // True if this API key is visible to developers. + bool visible_to_developers = 5; + + // True if this is a user owned key. + bool user_owned = 6; + + // Optional time after which this API key is no longer valid. + int64 expiry_usec = 7; +} + +message CreateApiKeyRequest { + context.RequestContext request_context = 1; + + // The ID of the group to create the API key for. + // ex: "GR123456789" + string group_id = 2; + + // Optional. The user-specified label of this API key that helps them + // remember what it's for. + string label = 3; + + // Optional. Capabilities granted to this API key. + repeated ApiKey.Capability capability = 4; + + // True if this API key should be visible to developers. + bool visible_to_developers = 5; +} + +message CreateApiKeyResponse { + context.ResponseContext response_context = 1; + + // The API key that was created. + ApiKey api_key = 2; +} + +message GetApiKeysRequest { + context.RequestContext request_context = 1; + + // The ID of the group to get API keys for. + // ex: "GR123456789" + string group_id = 2; +} + +message GetApiKeysResponse { + context.ResponseContext response_context = 1; + + // The API keys owned by the requested group. + repeated ApiKey api_key = 2; +} + +message GetApiKeyRequest { + context.RequestContext request_context = 1; + + // The ID of the API key to retrieve. + // ex: "AK123456789" + string api_key_id = 2; +} + +message GetApiKeyResponse { + context.ResponseContext response_context = 1; + + ApiKey api_key = 2; +} + +message UpdateApiKeyRequest { + context.RequestContext request_context = 1; + + // The unique ID of the API key to be updated. + // ex: "AK123456789" + string id = 2; + + // Optional. The user-specified label of this API key that helps them + // remember what it's for. + // + // NOTE: If this is empty, the label will be deleted. + string label = 3; + + // Optional. The capabilities associated with this API key. + // + // NOTE: If this is empty, all capabilities will be removed as part of + // this update. + repeated ApiKey.Capability capability = 4; + + // True if this API key should be visible to developers. + bool visible_to_developers = 5; +} + +message UpdateApiKeyResponse { + context.ResponseContext response_context = 1; +} + +message DeleteApiKeyRequest { + context.RequestContext request_context = 1; + + // The unique ID of the API key to be updated. + // ex: "AK123456789" + string id = 2; +} + +message DeleteApiKeyResponse { + context.ResponseContext response_context = 1; +} + +message CreateImpersonationApiKeyRequest { + context.RequestContext request_context = 1; +} + +message CreateImpersonationApiKeyResponse { + context.ResponseContext response_context = 1; + + // The API key that was created. + ApiKey api_key = 2; +} diff --git a/third_party/buildbuddy/proto/cache.proto b/third_party/buildbuddy/proto/cache.proto index 43f6fcf7a..929dcbd55 100644 --- a/third_party/buildbuddy/proto/cache.proto +++ b/third_party/buildbuddy/proto/cache.proto @@ -2,7 +2,15 @@ syntax = "proto3"; package cache; -// Next Tag: 14 +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "third_party/buildbuddy/proto/context.proto"; +import "third_party/buildbuddy/proto/remote_execution.proto"; +import "third_party/buildbuddy/proto/resource.proto"; + +// Next Tag: 17 message CacheStats { // Server-side Action-cache stats. int64 action_cache_hits = 1; @@ -14,10 +22,23 @@ message CacheStats { int64 cas_cache_misses = 5; int64 cas_cache_uploads = 6; - // Do not use these numbers to compute throughput, they are the sum total of - // many concurrent uploads. + // NOTE: Do not use size / time fields to compute throughput; use the + // throughput fields instead which are intended to be more accurate. + + // Sum of digest sizes for all cache download requests. int64 total_download_size_bytes = 7; + + // Sum of digest sizes for all cache upload requests. int64 total_upload_size_bytes = 8; + + // Sum of payload sizes (compressed, if applicable) for all cache download + // requests. + int64 total_download_transferred_size_bytes = 14; + + // Sum of payload sizes (compressed, if applicable) for all cache upload + // requests. + int64 total_upload_transferred_size_bytes = 15; + int64 total_download_usec = 9; int64 total_upload_usec = 10; @@ -28,15 +49,247 @@ message CacheStats { // The approximate time savings of a build based on // the sum of execution time of cached objects. int64 total_cached_action_exec_usec = 11; + + // The sum of execution time of actions that missed the action cache. + int64 total_uncached_action_exec_usec = 16; +} + +// Request to retrieve detailed per-request cache stats. +message GetCacheScoreCardRequest { + context.RequestContext request_context = 1; + + // The invocation ID for which to look up cache stats. + string invocation_id = 2; + + // A page token returned from the previous response, or an empty string + // initially. + string page_token = 3; + + message Filter { + // Field mask selecting filters to be applied. + google.protobuf.FieldMask mask = 1; + + // Return only results with this request type. + RequestType request_type = 3; + + // Return only results matching this response type. + ResponseType response_type = 4; + + // Return only results whose target ID, action mnemonic, action ID, or + // digest hash contain this string. + string search = 5; + + // Return only results with this cache type. + resource.CacheType cache_type = 6; + + // If true, the search string must match exactly and won't match prefix + // strings. + bool exact_match = 7; + } + + // Optional filter for returned results. + Filter filter = 4; + + enum OrderBy { + UNKNOWN_ORDER_BY = 0; + // Order by start time. + ORDER_BY_START_TIME = 1; + // Order by request duration. + ORDER_BY_DURATION = 2; + // Order by digest size. + ORDER_BY_SIZE = 3; + } + + // OrderBy specifies how to group results. + OrderBy order_by = 5; + + // Whether to sort in descending order. + bool descending = 6; + + reserved 7; + + enum GroupBy { + UNKNOWN_GROUP_BY = 0; + // Group by action ID. + GROUP_BY_ACTION = 1; + // Group by target ID. + GROUP_BY_TARGET = 2; + } + + // Specifies how to group results. When grouping, the results are grouped by + // first sorting all results by the OrderBy field, then effectively re-sorting + // so that results with the same group key are in contiguous runs, while still + // preserving the original relative ordering within groups. + // + // For example, when grouping by action and sorting by start time in ascending + // order, the results will be grouped into contiguous runs of results where + // each run is sorted in ascending order of start time, and the runs + // themselves are ordered by earliest result start time. + GroupBy group_by = 8; +} + +message GetCacheScoreCardResponse { + context.ResponseContext response_context = 1; + + // The cache results for the current page. + repeated ScoreCard.Result results = 2; + + // An opaque token that can be included in a subsequent request to fetch more + // results from the server. If empty, there are no more results available. + string next_page_token = 3; +} + +// RequestType represents the type of cache request being performed: read or +// write. +enum RequestType { + UNKNOWN_REQUEST_TYPE = 0; + // Cache read. + READ = 1; + // Cache write. + WRITE = 2; +} + +// ResponseType represents a category of RPC response codes. This is used +// instead of raw response codes since NOT_FOUND errors are quite common and it +// is helpful to have a category representing "all other errors". +enum ResponseType { + UNKNOWN_RESPONSE_TYPE = 0; + // Show only successful responses (OK response). + OK = 1; + // Show only NOT_FOUND responses. + NOT_FOUND = 2; + // Show only error responses (excluding NOT_FOUND). + ERROR = 3; } message ScoreCard { + // Result holds details about the result of a single cache request. message Result { + // The short action name of the action relevant to the transfer, + // such as "GoCompile". string action_mnemonic = 1; + + // The Bazel target label relevant to the transfer, such as "//foo:bar". string target_id = 2; + + // Action digest hash for the action relevant to the transfer. string action_id = 3; + + // The type of cache request described by this result (read or write). + RequestType request_type = 5; + + // Response status of the cache request. For example, a cache miss is + // represented by a READ request_type with a NotFound status code. + google.rpc.Status status = 6; + + // The digest of the requested contents. + build.bazel.remote.execution.v2.Digest digest = 7; + + // The timestamp at which the server received the request from the client. + google.protobuf.Timestamp start_time = 8; + + // The time needed for the transfer to complete, starting from start_time. + google.protobuf.Duration duration = 9; + + // Compression type used for the transfer. + build.bazel.remote.execution.v2.Compressor.Value compressor = 10; + + // The number of bytes transferred. If the request was successful, then this + // will be the size of the compressed payload (possibly the IDENTITY + // compressor, in which case this will just be the digest size). If the + // request was unsuccessful, this represents the number of bytes + // transferred before the failure, if any. + int64 transferred_size_bytes = 11; + + // The file name of the cache artifact, if known. + // Ex: "server/util/url/url.a" + string name = 12; + + // The file's path prefix, if known. This is part of the full file path, so + // it is important, but it can be "noisy" for display purposes, so it is + // kept separate. + // Ex: "bazel-out/k8-fastbuild/bin" + string path_prefix = 13; + + // The type of cache relevant to this result. + resource.CacheType cache_type = 14; + + // The details of the execution that originally produced this result. + + // When the worker started executing the action command. + google.protobuf.Timestamp execution_start_timestamp = 15; + // When the worker completed executing the action command. + google.protobuf.Timestamp execution_completed_timestamp = 16; } // In the interest of saving space, we only show cache misses. + // TODO(bduffany): use flat `results` list and deprecate this repeated Result misses = 1; + + repeated Result results = 2; +} + +// Fetches metadata about a cache resource +message GetCacheMetadataRequest { + context.RequestContext request_context = 1; + + // The name of the resource we wish to fetch metadata for + resource.ResourceName resource_name = 2; +} + +message GetCacheMetadataResponse { + context.ResponseContext response_context = 1; + int64 stored_size_bytes = 2; + int64 last_access_usec = 3; + int64 last_modify_usec = 4; + int64 digest_size_bytes = 5; +} + +// Used to cache GetTree responses. +message DirectoryWithDigest { + reserved 2; + + build.bazel.remote.execution.v2.Directory directory = 1; + resource.ResourceName resource_name = 3; +} + +message TreeCache { + repeated DirectoryWithDigest children = 1; +} + +// Fetch the cumulative sizes of all of the directories beneath the specified +// root. If the cache doesn't hold the full file hierarchy for any subtree, +// all parents of the subtree will *not* be calculated, since we can't know +// the "true" size of the tree. +message GetTreeDirectorySizesRequest { + context.RequestContext request_context = 1; + + build.bazel.remote.execution.v2.Digest root_digest = 2; + + string instance_name = 3; + + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 4; +} + +message DigestWithTotalSize { + // The stringified digest in the form + string digest = 1; + + // The total size of the specified object in the CAS, *including* the + // recursively expanded subcontents of child directories if this is a + // Directory. + int64 total_size = 2; + + // If this is a directory, the total number of files and directories + // contained within the directory. The directory itself does not count + // (i.e., an empty directory has child_count == 0) + int64 child_count = 3; +} + +message GetTreeDirectorySizesResponse { + context.ResponseContext response_context = 1; + + // A map from the digest string (hash/bytes) to the *total* cumulative size + // of the directory (i.e., a different, potentially larger number of bytes). + repeated DigestWithTotalSize sizes = 2; } diff --git a/third_party/buildbuddy/proto/context.proto b/third_party/buildbuddy/proto/context.proto index f740b54e4..a3b00565d 100644 --- a/third_party/buildbuddy/proto/context.proto +++ b/third_party/buildbuddy/proto/context.proto @@ -4,7 +4,8 @@ import "third_party/buildbuddy/proto/user_id.proto"; package context; -// Next ID: 5 +// Next ID: 6 + message RequestContext { user_id.UserId user_id = 1; @@ -33,6 +34,10 @@ message RequestContext { // See also // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset int32 timezone_offset_minutes = 3; + + // The timezone from the user's browser (if supported). If empty, we will fall + // back to timezone_offset_mintues. If that is unset, we default to UTC. + string timezone = 5; } message ResponseContext { diff --git a/third_party/buildbuddy/proto/invocation.proto b/third_party/buildbuddy/proto/invocation.proto index 4a1f3eb8a..b8e01dc26 100644 --- a/third_party/buildbuddy/proto/invocation.proto +++ b/third_party/buildbuddy/proto/invocation.proto @@ -1,20 +1,44 @@ syntax = "proto3"; import "third_party/buildbuddy/proto/acl.proto"; +import "third_party/buildbuddy/proto/api_key.proto"; import "third_party/bazel/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto"; import "third_party/buildbuddy/proto/cache.proto"; import "third_party/bazel/src/main/protobuf/command_line.proto"; import "third_party/buildbuddy/proto/context.proto"; +import "third_party/buildbuddy/proto/invocation_status.proto"; +import "third_party/buildbuddy/proto/stat_filter.proto"; +import "third_party/buildbuddy/proto/target.proto"; import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; package invocation; -// Next tag: 25 +// User specified option for download outputs from remote cache +enum DownloadOutputsOption { + UNKNOWN_DOWNLOAD_OUTPUTS_OPTION = 0; + + // --remote_cache is not set + NONE = 1; + // --remote_cache is set and --remote_download_outputs is not set to + // "toplevel" or "minimal" + ALL = 2; + // --remote_download_outputs is set to "toplevel". + TOP_LEVEL = 3; + // --remote_download_outputs is set to "minimal". + MINIMAL = 4; +} + +// Next tag: 31 message Invocation { // The invocation identifier itself. string invocation_id = 1; // The ordered set of build events generated by this invocation. + // Progress events are Target related events are not included: + // (NamedSetOfFiles, TargetComplete, TargetConfigured, TargetSummary, + // TestResult, TestSummary) For those events, use GetEventLogChunkRequest and + // GetTargetRequest. repeated InvocationEvent event = 2; // Whether or not the build was successful. @@ -38,13 +62,7 @@ message Invocation { // The number of actions performed. int64 action_count = 9; - enum InvocationStatus { - UNKNOWN_INVOCATION_STATUS = 0; - COMPLETE_INVOCATION_STATUS = 1; - PARTIAL_INVOCATION_STATUS = 2; - DISCONNECTED_INVOCATION_STATUS = 3; - } - InvocationStatus invocation_status = 10; + invocation_status.InvocationStatus invocation_status = 10; // The console buffer extracted from the build events in this invocation. // NB: This buffer may be incomplete if invocation_status is not equal to @@ -93,6 +111,39 @@ message Invocation { // The name of the git branch for this invocation, if any string branch_name = 23; + + // The number of times this invocation has been attempted + uint64 attempt = 25; + + // The exit code of the bazel command + string bazel_exit_code = 26; + + // The capabilities of the user who created the invocation + repeated api_key.ApiKey.Capability created_with_capabilities = 27; + + // The user's setting of how to download outputs from remote cache. + DownloadOutputsOption download_outputs_option = 28; + + // The user's setting of whether to upload local results to remote cache. + bool upload_local_results_enabled = 29; + + // The user's setting of whether remote execution is enabled + bool remote_execution_enabled = 30; + message Tag { + string name = 1; + } + repeated Tag tags = 31; + + // Target groups, containing initial data pages for target listings. The + // target group with status unspecified will have artifacts expanded. + repeated target.TargetGroup target_groups = 32; + + // Whether the pattern expanded event was truncated due to the total pattern + // length being too large. + bool patterns_truncated = 33; + + // Number of TargetConfigured events seen in the invocation. + int64 target_configured_count = 34; } message InvocationEvent { @@ -142,6 +193,11 @@ message GetInvocationOwnerResponse { // Group ID that owns the invocation. string group_id = 2; + + // Default URL from the group. If custom subdomains are enabled this will + // reference the group subdomain, otherwise it will point to the default + // BuildBuddy URL. + string group_url = 3; } message UpdateInvocationRequest { @@ -169,6 +225,17 @@ message DeleteInvocationResponse { context.ResponseContext response_context = 1; } +message CancelExecutionsRequest { + context.RequestContext request_context = 1; + + // The ID of the invocation to be canceled. + string invocation_id = 2; +} + +message CancelExecutionsResponse { + context.ResponseContext response_context = 1; +} + message InvocationQuery { // The search parameters in this query will be ANDed when performing a // search -- so if a client species both "user" and "host", all results @@ -202,28 +269,28 @@ message InvocationQuery { // Status of the build. If multiple are specified, they are combined with // "OR". - repeated OverallStatus status = 9; + repeated invocation_status.OverallStatus status = 9; // The git branch used for the build. string branch_name = 10; -} -// OverallStatus is a status representing both the completion status and -// success status of an invocation. -enum OverallStatus { - UNKNOWN_OVERALL_STATUS = 0; + // The bazel command that was used. Ex: "build", "test", "run" + string command = 11; + + // The minimum invocation duration. + google.protobuf.Duration minimum_duration = 12; - // Status representing a completed, successful invocation. - SUCCESS = 1; + // The maximum invocation duration. + google.protobuf.Duration maximum_duration = 13; - // Status representing a completed, unsuccessful invocation. - FAILURE = 2; + // Stat filters (duration_usec, cas_cache_misses, etc.) + repeated stat_filter.StatFilter filter = 14; - // Status representing a partial invocation. - IN_PROGRESS = 3; + // The pattern for the targets built (exact match). Ex: "//..." + string pattern = 15; - // Status representing a disconnected invocation. - DISCONNECTED = 4; + // Plaintext tags for the targets built (exact match). Ex: "my-cool-tag" + repeated string tags = 16; } message InvocationSort { @@ -231,6 +298,12 @@ message InvocationSort { UNKNOWN_SORT_FIELD = 0; CREATED_AT_USEC_SORT_FIELD = 1; UPDATED_AT_USEC_SORT_FIELD = 2; + DURATION_SORT_FIELD = 3; + ACTION_CACHE_HIT_RATIO_SORT_FIELD = 4; + CONTENT_ADDRESSABLE_STORE_CACHE_HIT_RATIO_SORT_FIELD = 5; + CACHE_DOWNLOADED_SORT_FIELD = 6; + CACHE_UPLOADED_SORT_FIELD = 7; + CACHE_TRANSFERRED_SORT_FIELD = 8; } // The field to sort results by. @@ -281,6 +354,7 @@ enum AggType { COMMIT_SHA_AGGREGATION_TYPE = 5; DATE_AGGREGATION_TYPE = 6; BRANCH_AGGREGATION_TYPE = 7; + PATTERN_AGGREGATION_TYPE = 8; } message InvocationStat { @@ -341,10 +415,22 @@ message InvocationStatQuery { // Status of the build. If multiple are specified, they are combined with // "OR". - repeated OverallStatus status = 9; + repeated invocation_status.OverallStatus status = 9; // The git branch used for the build. string branch_name = 10; + + // The bazel command that was used. Ex: "build", "test", "run" + string command = 11; + + // Stat filters (duration_usec, cas_cache_misses, etc.) + repeated stat_filter.StatFilter filter = 12; + + // The pattern for the targets built (exact match). Ex: "//..." + string pattern = 13; + + // Plaintext tags for the targets built (exact match). Ex: "my-cool-tag" + repeated string tags = 14; } message GetInvocationStatRequest { @@ -367,104 +453,3 @@ message GetInvocationStatResponse { // The list of invocation stats found. repeated InvocationStat invocation_stat = 2; } - -message TrendStat { - string name = 1; - - // The sum of all invocation durations for this entity. - int64 total_build_time_usec = 2; - - // The total number of invocations completed by this entity. - int64 total_num_builds = 3; - - // The number of invocations with a duration longer than 0 seconds. - int64 completed_invocation_count = 4; - - // The number of unique users who stared a build. - int64 user_count = 5; - - // The number of unique commits that caused a build. - int64 commit_count = 6; - - // The number of unique hosts that ran a build. - int64 host_count = 7; - - // The number of unique repos that were built. - int64 repo_count = 8; - - // The duration (in microseconds) of the longest build. - int64 max_duration_usec = 9; - - // Server-side Action-cache stats. - int64 action_cache_hits = 10; - int64 action_cache_misses = 11; - int64 action_cache_uploads = 12; - - // Server-side CAS-cache stats. - int64 cas_cache_hits = 13; - int64 cas_cache_misses = 14; - int64 cas_cache_uploads = 15; - - // Download / upload stats. - int64 total_download_size_bytes = 16; - int64 total_upload_size_bytes = 17; - int64 total_download_usec = 18; - int64 total_upload_usec = 19; - - // The number of unique branches that were built. - int64 branch_count = 20; -} - -message TrendQuery { - // The search parameters in this query will be ANDed when performing a - // query -- so if a client specifies both "user" and "host", all results - // returned must match both fields. - - // The unix-user who performed the build. - string user = 1; - - // The host this build was executed on. - string host = 2; - - // The git repo the build was for. - string repo_url = 4; - - // The commit sha used for the build. - string commit_sha = 5; - - // The role played by the build. Ex: "CI". If multiple filters are specified, - // they are combined with "OR". - repeated string role = 6; - - // The timestamp on or after which the build was last updated (inclusive). - google.protobuf.Timestamp updated_after = 7; - - // The timestamp up to which the build was last updated (exclusive). - google.protobuf.Timestamp updated_before = 8; - - // Status of the build. If multiple are specified, they are combined with - // "OR". - repeated OverallStatus status = 9; - - // The git branch used for the build. - string branch_name = 10; -} - -message GetTrendRequest { - context.RequestContext request_context = 1; - - TrendQuery query = 2; - - // The maximum number of past days to aggregate. If not set, the server will - // pick an appropriate value. Probably 7. - // DEPRECATED: Use `query.updated_after` and `query.updated_before` instead. - // TODO(bduffany): Delete this once clients no longer use it. - int32 lookback_window_days = 3 [deprecated = true]; -} - -message GetTrendResponse { - context.ResponseContext response_context = 1; - - // The list of trend stats found. - repeated TrendStat trend_stat = 2; -} diff --git a/third_party/buildbuddy/proto/invocation_status.proto b/third_party/buildbuddy/proto/invocation_status.proto new file mode 100644 index 000000000..c7f4bd852 --- /dev/null +++ b/third_party/buildbuddy/proto/invocation_status.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package invocation_status; + +// OverallStatus is a status representing both the completion status and +// success status of an invocation. +enum OverallStatus { + UNKNOWN_OVERALL_STATUS = 0; + + // Status representing a completed, successful invocation. + SUCCESS = 1; + + // Status representing a completed, unsuccessful invocation. + FAILURE = 2; + + // Status representing a partial invocation. + IN_PROGRESS = 3; + + // Status representing a disconnected invocation. + DISCONNECTED = 4; +} + +enum InvocationStatus { + UNKNOWN_INVOCATION_STATUS = 0; + COMPLETE_INVOCATION_STATUS = 1; + PARTIAL_INVOCATION_STATUS = 2; + DISCONNECTED_INVOCATION_STATUS = 3; +} diff --git a/third_party/buildbuddy/proto/remote_execution.proto b/third_party/buildbuddy/proto/remote_execution.proto new file mode 100644 index 000000000..946d5e478 --- /dev/null +++ b/third_party/buildbuddy/proto/remote_execution.proto @@ -0,0 +1,2319 @@ +// Copyright 2018 The Bazel Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Prevent buildfix.sh from clang-formatting this file because it's misformatted +// in the upstream repository. +// clang-format off + +syntax = "proto3"; + +package build.bazel.remote.execution.v2; + +import "third_party/buildbuddy/proto/semver.proto"; +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; +import "third_party/buildbuddy/proto/scheduler.proto"; + +option csharp_namespace = "Build.Bazel.Remote.Execution.V2"; +option go_package = "proto"; +option java_multiple_files = true; +option java_outer_classname = "RemoteExecutionProto"; +option java_package = "build.bazel.remote.execution.v2"; +option objc_class_prefix = "REX"; + + +// The Remote Execution API is used to execute an +// [Action][build.bazel.remote.execution.v2.Action] on the remote +// workers. +// +// As with other services in the Remote Execution API, any call may return an +// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing +// information about when the client should retry the request; clients SHOULD +// respect the information provided. +service Execution { + // Execute an action remotely. + // + // In order to execute an action, the client must first upload all of the + // inputs, the + // [Command][build.bazel.remote.execution.v2.Command] to run, and the + // [Action][build.bazel.remote.execution.v2.Action] into the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + // It then calls `Execute` with an `action_digest` referring to them. The + // server will run the action and eventually return the result. + // + // The input `Action`'s fields MUST meet the various canonicalization + // requirements specified in the documentation for their types so that it has + // the same digest as other logically equivalent `Action`s. The server MAY + // enforce the requirements and return errors if a non-canonical input is + // received. It MAY also proceed without verifying some or all of the + // requirements, such as for performance reasons. If the server does not + // verify the requirement, then it will treat the `Action` as distinct from + // another logically equivalent action if they hash differently. + // + // Returns a stream of + // [google.longrunning.Operation][google.longrunning.Operation] messages + // describing the resulting execution, with eventual `response` + // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The + // `metadata` on the operation is of type + // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata]. + // + // If the client remains connected after the first response is returned after + // the server, then updates are streamed as if the client had called + // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution] + // until the execution completes or the request reaches an error. The + // operation can also be queried using [Operations + // API][google.longrunning.Operations.GetOperation]. + // + // The server NEED NOT implement other methods or functionality of the + // Operations API. + // + // Errors discovered during creation of the `Operation` will be reported + // as gRPC Status errors, while errors that occurred while running the + // action will be reported in the `status` field of the `ExecuteResponse`. The + // server MUST NOT set the `error` field of the `Operation` proto. + // The possible errors include: + // + // * `INVALID_ARGUMENT`: One or more arguments are invalid. + // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the + // action requested, such as a missing input or command or no worker being + // available. The client may be able to fix the errors and retry. + // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run + // the action. + // * `UNAVAILABLE`: Due to a transient condition, such as all workers being + // occupied (and the server does not support a queue), the action could not + // be started. The client should retry. + // * `INTERNAL`: An internal error occurred in the execution engine or the + // worker. + // * `DEADLINE_EXCEEDED`: The execution timed out. + // * `CANCELLED`: The operation was cancelled by the client. This status is + // only possible if the server implements the Operations API CancelOperation + // method, and it was called for the current execution. + // + // In the case of a missing input or command, the server SHOULD additionally + // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail + // where, for each requested blob not present in the CAS, there is a + // `Violation` with a `type` of `MISSING` and a `subject` of + // `"blobs/{digest_function/}{hash}/{size}"` indicating the digest of the + // missing blob. The `subject` is formatted the same way as the + // `resource_name` provided to + // [ByteStream.Read][google.bytestream.ByteStream.Read], with the leading + // instance name omitted. `digest_function` MUST thus be omitted if its value + // is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, or VSO. + // + // The server does not need to guarantee that a call to this method leads to + // at most one execution of the action. The server MAY execute the action + // multiple times, potentially in parallel. These redundant executions MAY + // continue to run, even if the operation is completed. + rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" }; + } + + // Wait for an execution operation to complete. When the client initially + // makes the request, the server immediately responds with the current status + // of the execution. The server will leave the request stream open until the + // operation completes, and then respond with the completed operation. The + // server MAY choose to stream additional updates as execution progresses, + // such as to provide an update as to the state of the execution. + rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" }; + } + + // HACK(tylerw): Publish a stream of operation updates. + rpc PublishOperation(stream google.longrunning.Operation) + returns (PublishOperationResponse) { + option (google.api.http) = { + post: "/v2/{name=operations/**}:publishOperation" + body: "*" + }; + } +} + +message PublishOperationResponse {} + +// The action cache API is used to query whether a given action has already been +// performed and, if so, retrieve its result. Unlike the +// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], +// which addresses blobs by their own content, the action cache addresses the +// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a +// digest of the encoded [Action][build.bazel.remote.execution.v2.Action] +// which produced them. +// +// The lifetime of entries in the action cache is implementation-specific, but +// the server SHOULD assume that more recently used entries are more likely to +// be used again. +// +// As with other services in the Remote Execution API, any call may return an +// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing +// information about when the client should retry the request; clients SHOULD +// respect the information provided. +service ActionCache { + // Retrieve a cached execution result. + // + // Implementations SHOULD ensure that any blobs referenced from the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] + // are available at the time of returning the + // [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be + // for some period of time afterwards. The lifetimes of the referenced blobs SHOULD be increased + // if necessary and applicable. + // + // Errors: + // + // * `NOT_FOUND`: The requested `ActionResult` is not in the cache. + rpc GetActionResult(GetActionResultRequest) returns (ActionResult) { + option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" }; + } + + // Upload a new execution result. + // + // In order to allow the server to perform access control based on the type of + // action, and to assist with client debugging, the client MUST first upload + // the [Action][build.bazel.remote.execution.v2.Execution] that produced the + // result, along with its + // [Command][build.bazel.remote.execution.v2.Command], into the + // `ContentAddressableStorage`. + // + // Server implementations MAY modify the + // `UpdateActionResultRequest.action_result` and return an equivalent value. + // + // Errors: + // + // * `INVALID_ARGUMENT`: One or more arguments are invalid. + // * `FAILED_PRECONDITION`: One or more errors occurred in updating the + // action result, such as a missing command or action. + // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the + // entry to the cache. + rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) { + option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" }; + } +} + +// The CAS (content-addressable storage) is used to store the inputs to and +// outputs from the execution service. Each piece of content is addressed by the +// digest of its binary data. +// +// Most of the binary data stored in the CAS is opaque to the execution engine, +// and is only used as a communication medium. In order to build an +// [Action][build.bazel.remote.execution.v2.Action], +// however, the client will need to also upload the +// [Command][build.bazel.remote.execution.v2.Command] and input root +// [Directory][build.bazel.remote.execution.v2.Directory] for the Action. +// The Command and Directory messages must be marshalled to wire format and then +// uploaded under the hash as with any other piece of content. In practice, the +// input root directory is likely to refer to other Directories in its +// hierarchy, which must also each be uploaded on their own. +// +// For small file uploads the client should group them together and call +// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. +// +// For large uploads, the client must use the +// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. +// +// For uncompressed data, The `WriteRequest.resource_name` is of the following form: +// `{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}` +// +// Where: +// * `instance_name` is an identifier, possibly containing multiple path +// segments, used to distinguish between the various instances on the server, +// in a manner defined by the server. If it is the empty path, the leading +// slash is omitted, so that the `resource_name` becomes +// `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`. +// To simplify parsing, a path segment cannot equal any of the following +// keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`, +// `capabilities` or `compressed-blobs`. +// * `uuid` is a version 4 UUID generated by the client, used to avoid +// collisions between concurrent uploads of the same data. Clients MAY +// reuse the same `uuid` for uploading different blobs. +// * `digest_function` is a lowercase string form of a `DigestFunction.Value` +// enum, indicating which digest function was used to compute `hash`. If the +// digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, +// or VSO, this component MUST be omitted. In that case the server SHOULD +// infer the digest function using the length of the `hash` and the digest +// functions announced in the server's capabilities. +// * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest] +// of the data being uploaded. +// * `optional_metadata` is implementation specific data, which clients MAY omit. +// Servers MAY ignore this metadata. +// +// Data can alternatively be uploaded in compressed form, with the following +// `WriteRequest.resource_name` form: +// `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}` +// +// Where: +// * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are +// defined as above. +// * `compressor` is a lowercase string form of a `Compressor.Value` enum +// other than `identity`, which is supported by the server and advertised in +// [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor]. +// * `uncompressed_hash` and `uncompressed_size` refer to the +// [Digest][build.bazel.remote.execution.v2.Digest] of the data being +// uploaded, once uncompressed. Servers MUST verify that these match +// the uploaded data once uncompressed, and MUST return an +// `INVALID_ARGUMENT` error in the case of mismatch. +// +// Note that when writing compressed blobs, the `WriteRequest.write_offset` in +// the initial request in a stream refers to the offset in the uncompressed form +// of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the +// sum of the first request's 'WriteRequest.write_offset' and the total size of +// all the compressed data bundles in the previous requests. +// Note that this mixes an uncompressed offset with a compressed byte length, +// which is nonsensical, but it is done to fit the semantics of the existing +// ByteStream protocol. +// +// Uploads of the same data MAY occur concurrently in any form, compressed or +// uncompressed. +// +// Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write` +// calls of compressed blobs, since this would compress already-compressed data. +// +// When attempting an upload, if another client has already completed the upload +// (which may occur in the middle of a single upload if another client uploads +// the same blob concurrently), the request will terminate immediately without +// error, and with a response whose `committed_size` is the value `-1` if this +// is a compressed upload, or with the full size of the uploaded file if this is +// an uncompressed upload (regardless of how much data was transmitted by the +// client). If the client completes the upload but the +// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an +// `INVALID_ARGUMENT` error will be returned. In either case, the client should +// not attempt to retry the upload. +// +// Small downloads can be grouped and requested in a batch via +// [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. +// +// For large downloads, the client must use the +// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API. +// +// For uncompressed data, The `ReadRequest.resource_name` is of the following form: +// `{instance_name}/blobs/{digest_function/}{hash}/{size}` +// Where `instance_name`, `digest_function`, `hash` and `size` are defined as +// for uploads. +// +// Data can alternatively be downloaded in compressed form, with the following +// `ReadRequest.resource_name` form: +// `{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}` +// +// Where: +// * `instance_name`, `compressor` and `digest_function` are defined as for +// uploads. +// * `uncompressed_hash` and `uncompressed_size` refer to the +// [Digest][build.bazel.remote.execution.v2.Digest] of the data being +// downloaded, once uncompressed. Clients MUST verify that these match +// the downloaded data once uncompressed, and take appropriate steps in +// the case of failure such as retrying a limited number of times or +// surfacing an error to the user. +// +// When downloading compressed blobs: +// * `ReadRequest.read_offset` refers to the offset in the uncompressed form +// of the blob. +// * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is +// non-zero. +// * Servers MAY use any compression level they choose, including different +// levels for different blobs (e.g. choosing a level designed for maximum +// speed for data known to be incompressible). +// * Clients SHOULD NOT use gRPC-level compression, since this would compress +// already-compressed data. +// +// Servers MUST be able to provide data for all recently advertised blobs in +// each of the compression formats that the server supports, as well as in +// uncompressed form. +// +// The lifetime of entries in the CAS is implementation specific, but it SHOULD +// be long enough to allow for newly-added and recently looked-up entries to be +// used in subsequent calls (e.g. to +// [Execute][build.bazel.remote.execution.v2.Execution.Execute]). +// +// Servers MUST behave as though empty blobs are always available, even if they +// have not been uploaded. Clients MAY optimize away the uploading or +// downloading of empty blobs. +// +// As with other services in the Remote Execution API, any call may return an +// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing +// information about when the client should retry the request; clients SHOULD +// respect the information provided. +service ContentAddressableStorage { + // Determine if blobs are present in the CAS. + // + // Clients can use this API before uploading blobs to determine which ones are + // already present in the CAS and do not need to be uploaded again. + // + // Servers SHOULD increase the lifetimes of the referenced blobs if necessary and + // applicable. + // + // There are no method-specific errors. + rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) { + option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" }; + } + + // Upload many blobs at once. + // + // The server may enforce a limit of the combined total size of blobs + // to be uploaded using this API. This limit may be obtained using the + // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. + // Requests exceeding the limit should either be split into smaller + // chunks or uploaded using the + // [ByteStream API][google.bytestream.ByteStream], as appropriate. + // + // This request is equivalent to calling a Bytestream `Write` request + // on each individual blob, in parallel. The requests may succeed or fail + // independently. + // + // Errors: + // + // * `INVALID_ARGUMENT`: The client attempted to upload more than the + // server supported limit. + // + // Individual requests may return the following errors, additionally: + // + // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. + // * `INVALID_ARGUMENT`: The + // [Digest][build.bazel.remote.execution.v2.Digest] does not match the + // provided data. + rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) { + option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" }; + } + + // Download many blobs at once. + // + // The server may enforce a limit of the combined total size of blobs + // to be downloaded using this API. This limit may be obtained using the + // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. + // Requests exceeding the limit should either be split into smaller + // chunks or downloaded using the + // [ByteStream API][google.bytestream.ByteStream], as appropriate. + // + // This request is equivalent to calling a Bytestream `Read` request + // on each individual blob, in parallel. The requests may succeed or fail + // independently. + // + // Errors: + // + // * `INVALID_ARGUMENT`: The client attempted to read more than the + // server supported limit. + // + // Every error on individual read will be returned in the corresponding digest + // status. + rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) { + option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" }; + } + + // Fetch the entire directory tree rooted at a node. + // + // This request must be targeted at a + // [Directory][build.bazel.remote.execution.v2.Directory] stored in the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] + // (CAS). The server will enumerate the `Directory` tree recursively and + // return every node descended from the root. + // + // The GetTreeRequest.page_token parameter can be used to skip ahead in + // the stream (e.g. when retrying a partially completed and aborted request), + // by setting it to a value taken from GetTreeResponse.next_page_token of the + // last successfully processed GetTreeResponse). + // + // The exact traversal order is unspecified and, unless retrieving subsequent + // pages from an earlier request, is not guaranteed to be stable across + // multiple invocations of `GetTree`. + // + // If part of the tree is missing from the CAS, the server will return the + // portion present and omit the rest. + // + // Errors: + // + // * `NOT_FOUND`: The requested tree root is not present in the CAS. + rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) { + option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" }; + } +} + +// The Capabilities service may be used by remote execution clients to query +// various server properties, in order to self-configure or return meaningful +// error messages. +// +// The query may include a particular `instance_name`, in which case the values +// returned will pertain to that instance. +service Capabilities { + // GetCapabilities returns the server capabilities configuration of the + // remote endpoint. + // Only the capabilities of the services supported by the endpoint will + // be returned: + // * Execution + CAS + Action Cache endpoints should return both + // CacheCapabilities and ExecutionCapabilities. + // * Execution only endpoints should return ExecutionCapabilities. + // * CAS + Action Cache only endpoints should return CacheCapabilities. + // + // There are no method-specific errors. + rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) { + option (google.api.http) = { + get: "/v2/{instance_name=**}/capabilities" + }; + } +} + +// An `Action` captures all the information about an execution which is required +// to reproduce it. +// +// `Action`s are the core component of the [Execution] service. A single +// `Action` represents a repeatable action that can be performed by the +// execution service. `Action`s can be succinctly identified by the digest of +// their wire format encoding and, once an `Action` has been executed, will be +// cached in the action cache. Future requests can then use the cached result +// rather than needing to run afresh. +// +// When a server completes execution of an +// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to +// cache the [result][build.bazel.remote.execution.v2.ActionResult] in +// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless +// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By +// default, future calls to +// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same +// `Action` will also serve their results from the cache. Clients must take care +// to understand the caching behaviour. Ideally, all `Action`s will be +// reproducible so that serving a result from cache is always desirable and +// correct. +message Action { + // The digest of the [Command][build.bazel.remote.execution.v2.Command] + // to run, which MUST be present in the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + Digest command_digest = 1; + + // The digest of the root + // [Directory][build.bazel.remote.execution.v2.Directory] for the input + // files. The files in the directory tree are available in the correct + // location on the build machine before the command is executed. The root + // directory, as well as every subdirectory and content blob referred to, MUST + // be in the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + Digest input_root_digest = 2; + + reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command]. + + // A timeout after which the execution should be killed. If the timeout is + // absent, then the client is specifying that the execution should continue + // as long as the server will let it. The server SHOULD impose a timeout if + // the client does not specify one, however, if the client does specify a + // timeout that is longer than the server's maximum timeout, the server MUST + // reject the request. + // + // The timeout is only intended to cover the "execution" of the specified + // action and not time in queue nor any overheads before or after execution + // such as marshalling inputs/outputs. The server SHOULD avoid including time + // spent the client doesn't have control over, and MAY extend or reduce the + // timeout to account for delays or speedups that occur during execution + // itself (e.g., lazily loading data from the Content Addressable Storage, + // live migration of virtual machines, emulation overhead). + // + // The timeout is a part of the + // [Action][build.bazel.remote.execution.v2.Action] message, and + // therefore two `Actions` with different timeouts are different, even if they + // are otherwise identical. This is because, if they were not, running an + // `Action` with a lower timeout than is required might result in a cache hit + // from an execution run with a longer timeout, hiding the fact that the + // timeout is too short. By encoding it directly in the `Action`, a lower + // timeout will result in a cache miss and the execution timeout will fail + // immediately, rather than whenever the cache entry gets evicted. + google.protobuf.Duration timeout = 6; + + // If true, then the `Action`'s result cannot be cached, and in-flight + // requests for the same `Action` may not be merged. + bool do_not_cache = 7; + + // BUILDBUDDY-SPECIFIC FIELD BELOW. + // This field was removed (and marked reserved) in the upstream proto, but we + // have non-migrated Actions stored. Rather than do the migration, or support + // both, we'll just keep this field for the time being. + // + // List of required supported + // [NodeProperty][build.bazel.remote.execution.v2.NodeProperty] keys. In order + // to ensure that equivalent `Action`s always hash to the same value, the + // supported node properties MUST be lexicographically sorted by name. Sorting + // of strings is done by code point, equivalently, by the UTF-8 bytes. + // + // The interpretation of these properties is server-dependent. If a property + // is not recognized by the server, the server will return an + // `INVALID_ARGUMENT` error. + repeated string output_node_properties = 8; + + // An optional additional salt value used to place this `Action` into a + // separate cache namespace from other instances having the same field + // contents. This salt typically comes from operational configuration + // specific to sources such as repo and service configuration, + // and allows disowning an entire set of ActionResults that might have been + // poisoned by buggy software or tool failures. + bytes salt = 9; + + // The optional platform requirements for the execution environment. The + // server MAY choose to execute the action on any worker satisfying the + // requirements, so the client SHOULD ensure that running the action on any + // such worker will have the same result. A detailed lexicon for this can be + // found in the accompanying platform.md. + // New in version 2.2: clients SHOULD set these platform properties as well + // as those in the [Command][build.bazel.remote.execution.v2.Command]. Servers + // SHOULD prefer those set here. + Platform platform = 10; +} + +// A `Command` is the actual command executed by a worker running an +// [Action][build.bazel.remote.execution.v2.Action] and specifications of its +// environment. +// +// Except as otherwise required, the environment (such as which system +// libraries or binaries are available, and what filesystems are mounted where) +// is defined by and specific to the implementation of the remote execution API. +message Command { + // An `EnvironmentVariable` is one variable to set in the running program's + // environment. + message EnvironmentVariable { + // The variable name. + string name = 1; + + // The variable value. + string value = 2; + } + + // The arguments to the command. + // + // The first argument specifies the command to run, which may be either an + // absolute path, a path relative to the working directory, or an unqualified + // path (without path separators) which will be resolved using the operating + // system's equivalent of the PATH environment variable. Path separators + // native to the operating system running on the worker SHOULD be used. If the + // `environment_variables` list contains an entry for the PATH environment + // variable, it SHOULD be respected. If not, the resolution process is + // implementation-defined. + // + // Changed in v2.3. v2.2 and older require that no PATH lookups are performed, + // and that relative paths are resolved relative to the input root. This + // behavior can, however, not be relied upon, as most implementations already + // followed the rules described above. + repeated string arguments = 1; + + // The environment variables to set when running the program. The worker may + // provide its own default environment variables; these defaults can be + // overridden using this field. Additional variables can also be specified. + // + // In order to ensure that equivalent + // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same + // value, the environment variables MUST be lexicographically sorted by name. + // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. + repeated EnvironmentVariable environment_variables = 2; + + // A list of the output files that the client expects to retrieve from the + // action. Only the listed files, as well as directories listed in + // `output_directories`, will be returned to the client as output. + // Other files or directories that may be created during command execution + // are discarded. + // + // The paths are relative to the working directory of the action execution. + // The paths are specified using a single forward slash (`/`) as a path + // separator, even if the execution platform natively uses a different + // separator. The path MUST NOT include a trailing slash, nor a leading slash, + // being a relative path. + // + // In order to ensure consistent hashing of the same Action, the output paths + // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 + // bytes). + // + // An output file cannot be duplicated, be a parent of another output file, or + // have the same path as any of the listed output directories. + // + // Directories leading up to the output files are created by the worker prior + // to execution, even if they are not explicitly part of the input root. + // + // DEPRECATED since v2.1: Use `output_paths` instead. + repeated string output_files = 3; + + // A list of the output directories that the client expects to retrieve from + // the action. Only the listed directories will be returned (an entire + // directory structure will be returned as a + // [Tree][build.bazel.remote.execution.v2.Tree] message digest, see + // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]), as + // well as files listed in `output_files`. Other files or directories that + // may be created during command execution are discarded. + // + // The paths are relative to the working directory of the action execution. + // The paths are specified using a single forward slash (`/`) as a path + // separator, even if the execution platform natively uses a different + // separator. The path MUST NOT include a trailing slash, nor a leading slash, + // being a relative path. The special value of empty string is allowed, + // although not recommended, and can be used to capture the entire working + // directory tree, including inputs. + // + // In order to ensure consistent hashing of the same Action, the output paths + // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 + // bytes). + // + // An output directory cannot be duplicated or have the same path as any of + // the listed output files. An output directory is allowed to be a parent of + // another output directory. + // + // Directories leading up to the output directories (but not the output + // directories themselves) are created by the worker prior to execution, even + // if they are not explicitly part of the input root. + // + // DEPRECATED since 2.1: Use `output_paths` instead. + repeated string output_directories = 4; + + // A list of the output paths that the client expects to retrieve from the + // action. Only the listed paths will be returned to the client as output. + // The type of the output (file or directory) is not specified, and will be + // determined by the server after action execution. If the resulting path is + // a file, it will be returned in an + // [OutputFile][build.bazel.remote.execution.v2.OutputFile] typed field. + // If the path is a directory, the entire directory structure will be returned + // as a [Tree][build.bazel.remote.execution.v2.Tree] message digest, see + // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory] + // Other files or directories that may be created during command execution + // are discarded. + // + // The paths are relative to the working directory of the action execution. + // The paths are specified using a single forward slash (`/`) as a path + // separator, even if the execution platform natively uses a different + // separator. The path MUST NOT include a trailing slash, nor a leading slash, + // being a relative path. + // + // In order to ensure consistent hashing of the same Action, the output paths + // MUST be deduplicated and sorted lexicographically by code point (or, + // equivalently, by UTF-8 bytes). + // + // Directories leading up to the output paths are created by the worker prior + // to execution, even if they are not explicitly part of the input root. + // + // New in v2.1: this field supersedes the DEPRECATED `output_files` and + // `output_directories` fields. If `output_paths` is used, `output_files` and + // `output_directories` will be ignored! + repeated string output_paths = 7; + + // The platform requirements for the execution environment. The server MAY + // choose to execute the action on any worker satisfying the requirements, so + // the client SHOULD ensure that running the action on any such worker will + // have the same result. A detailed lexicon for this can be found in the + // accompanying platform.md. + // DEPRECATED as of v2.2: platform properties are now specified directly in + // the action. See documentation note in the + // [Action][build.bazel.remote.execution.v2.Action] for migration. + Platform platform = 5; + + // The working directory, relative to the input root, for the command to run + // in. It must be a directory which exists in the input tree. If it is left + // empty, then the action is run in the input root. + string working_directory = 6; + + // A list of keys for node properties the client expects to retrieve for + // output files and directories. Keys are either names of string-based + // [NodeProperty][build.bazel.remote.execution.v2.NodeProperty] or + // names of fields in [NodeProperties][build.bazel.remote.execution.v2.NodeProperties]. + // In order to ensure that equivalent `Action`s always hash to the same + // value, the node properties MUST be lexicographically sorted by name. + // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. + // + // The interpretation of string-based properties is server-dependent. If a + // property is not recognized by the server, the server will return an + // `INVALID_ARGUMENT`. + repeated string output_node_properties = 8; +} + +// A `Platform` is a set of requirements, such as hardware, operating system, or +// compiler toolchain, for an +// [Action][build.bazel.remote.execution.v2.Action]'s execution +// environment. A `Platform` is represented as a series of key-value pairs +// representing the properties that are required of the platform. +message Platform { + // A single property for the environment. The server is responsible for + // specifying the property `name`s that it accepts. If an unknown `name` is + // provided in the requirements for an + // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD + // reject the execution request. If permitted by the server, the same `name` + // may occur multiple times. + // + // The server is also responsible for specifying the interpretation of + // property `value`s. For instance, a property describing how much RAM must be + // available may be interpreted as allowing a worker with 16GB to fulfill a + // request for 8GB, while a property describing the OS environment on which + // the action must be performed may require an exact match with the worker's + // OS. + // + // The server MAY use the `value` of one or more properties to determine how + // it sets up the execution environment, such as by making specific system + // files available to the worker. + // + // Both names and values are typically case-sensitive. Note that the platform + // is implicitly part of the action digest, so even tiny changes in the names + // or values (like changing case) may result in different action cache + // entries. + message Property { + // The property name. + string name = 1; + + // The property value. + string value = 2; + } + + // The properties that make up this platform. In order to ensure that + // equivalent `Platform`s always hash to the same value, the properties MUST + // be lexicographically sorted by name, and then by value. Sorting of strings + // is done by code point, equivalently, by the UTF-8 bytes. + repeated Property properties = 1; +} + +// A `Directory` represents a directory node in a file tree, containing zero or +// more children [FileNodes][build.bazel.remote.execution.v2.FileNode], +// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and +// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. +// Each `Node` contains its name in the directory, either the digest of its +// content (either a file blob or a `Directory` proto) or a symlink target, as +// well as possibly some metadata about the file or directory. +// +// In order to ensure that two equivalent directory trees hash to the same +// value, the following restrictions MUST be obeyed when constructing a +// a `Directory`: +// +// * Every child in the directory must have a path of exactly one segment. +// Multiple levels of directory hierarchy may not be collapsed. +// * Each child in the directory must have a unique path segment (file name). +// Note that while the API itself is case-sensitive, the environment where +// the Action is executed may or may not be case-sensitive. That is, it is +// legal to call the API with a Directory that has both "Foo" and "foo" as +// children, but the Action may be rejected by the remote system upon +// execution. +// * The files, directories and symlinks in the directory must each be sorted +// in lexicographical order by path. The path strings must be sorted by code +// point, equivalently, by UTF-8 bytes. +// * The [NodeProperties][build.bazel.remote.execution.v2.NodeProperty] of files, +// directories, and symlinks must be sorted in lexicographical order by +// property name. +// +// A `Directory` that obeys the restrictions is said to be in canonical form. +// +// As an example, the following could be used for a file named `bar` and a +// directory named `foo` with an executable file named `baz` (hashes shortened +// for readability): +// +// ```json +// // (Directory proto) +// { +// files: [ +// { +// name: "bar", +// digest: { +// hash: "4a73bc9d03...", +// size: 65534 +// }, +// node_properties: [ +// { +// "name": "MTime", +// "value": "2017-01-15T01:30:15.01Z" +// } +// ] +// } +// ], +// directories: [ +// { +// name: "foo", +// digest: { +// hash: "4cf2eda940...", +// size: 43 +// } +// } +// ] +// } +// +// // (Directory proto with hash "4cf2eda940..." and size 43) +// { +// files: [ +// { +// name: "baz", +// digest: { +// hash: "b2c941073e...", +// size: 1294, +// }, +// is_executable: true +// } +// ] +// } +// ``` +message Directory { + // The files in the directory. + repeated FileNode files = 1; + + // The subdirectories in the directory. + repeated DirectoryNode directories = 2; + + // The symlinks in the directory. + repeated SymlinkNode symlinks = 3; + + // The node properties of the Directory. + reserved 4; + NodeProperties node_properties = 5; +} + +// A single property for [FileNodes][build.bazel.remote.execution.v2.FileNode], +// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and +// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is +// responsible for specifying the property `name`s that it accepts. If +// permitted by the server, the same `name` may occur multiple times. +message NodeProperty { + // The property name. + string name = 1; + + // The property value. + string value = 2; +} + +// Node properties for [FileNodes][build.bazel.remote.execution.v2.FileNode], +// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and +// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is +// responsible for specifying the properties that it accepts. +// +message NodeProperties { + // A list of string-based + // [NodeProperties][build.bazel.remote.execution.v2.NodeProperty]. + repeated NodeProperty properties = 1; + + // The file's last modification timestamp. + google.protobuf.Timestamp mtime = 2; + + // The UNIX file mode, e.g., 0755. + google.protobuf.UInt32Value unix_mode = 3; +} + +// A `FileNode` represents a single file and associated metadata. +message FileNode { + // The name of the file. + string name = 1; + + // The digest of the file's content. + Digest digest = 2; + + reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`. + + // True if file is executable, false otherwise. + bool is_executable = 4; + + // The node properties of the FileNode. + reserved 5; + NodeProperties node_properties = 6; +} + +// A `DirectoryNode` represents a child of a +// [Directory][build.bazel.remote.execution.v2.Directory] which is itself +// a `Directory` and its associated metadata. +message DirectoryNode { + // The name of the directory. + string name = 1; + + // The digest of the + // [Directory][build.bazel.remote.execution.v2.Directory] object + // represented. See [Digest][build.bazel.remote.execution.v2.Digest] + // for information about how to take the digest of a proto message. + Digest digest = 2; +} + +// A `SymlinkNode` represents a symbolic link. +message SymlinkNode { + // The name of the symlink. + string name = 1; + + // The target path of the symlink. The path separator is a forward slash `/`. + // The target path can be relative to the parent directory of the symlink or + // it can be an absolute path starting with `/`. Support for absolute paths + // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] + // API. `..` components are allowed anywhere in the target path as logical + // canonicalization may lead to different behavior in the presence of + // directory symlinks (e.g. `foo/../bar` may not be the same as `bar`). + // To reduce potential cache misses, canonicalization is still recommended + // where this is possible without impacting correctness. + string target = 2; + + // The node properties of the SymlinkNode. + reserved 3; + NodeProperties node_properties = 4; +} + +// A content digest. A digest for a given blob consists of the size of the blob +// and its hash. The hash algorithm to use is defined by the server. +// +// The size is considered to be an integral part of the digest and cannot be +// separated. That is, even if the `hash` field is correctly specified but +// `size_bytes` is not, the server MUST reject the request. +// +// The reason for including the size in the digest is as follows: in a great +// many cases, the server needs to know the size of the blob it is about to work +// with prior to starting an operation with it, such as flattening Merkle tree +// structures or streaming it to a worker. Technically, the server could +// implement a separate metadata store, but this results in a significantly more +// complicated implementation as opposed to having the client specify the size +// up-front (or storing the size along with the digest in every message where +// digests are embedded). This does mean that the API leaks some implementation +// details of (what we consider to be) a reasonable server implementation, but +// we consider this to be a worthwhile tradeoff. +// +// When a `Digest` is used to refer to a proto message, it always refers to the +// message in binary encoded form. To ensure consistent hashing, clients and +// servers MUST ensure that they serialize messages according to the following +// rules, even if there are alternate valid encodings for the same message: +// +// * Fields are serialized in tag order. +// * There are no unknown fields. +// * There are no duplicate fields. +// * Fields are serialized according to the default semantics for their type. +// +// Most protocol buffer implementations will always follow these rules when +// serializing, but care should be taken to avoid shortcuts. For instance, +// concatenating two messages to merge them may produce duplicate fields. +message Digest { + // The hash. In the case of SHA-256, it will always be a lowercase hex string + // exactly 64 characters long. + string hash = 1; + + // The size of the blob, in bytes. + int64 size_bytes = 2; +} + +// ExecutedActionMetadata contains details about a completed execution. +message ExecutedActionMetadata { + // The name of the worker which ran the execution. + string worker = 1; + + // When was the action added to the queue. + google.protobuf.Timestamp queued_timestamp = 2; + + // When the worker received the action. + google.protobuf.Timestamp worker_start_timestamp = 3; + + // When the worker completed the action, including all stages. + google.protobuf.Timestamp worker_completed_timestamp = 4; + + // When the worker started fetching action inputs. + google.protobuf.Timestamp input_fetch_start_timestamp = 5; + + // When the worker finished fetching action inputs. + google.protobuf.Timestamp input_fetch_completed_timestamp = 6; + + // When the worker started executing the action command. + google.protobuf.Timestamp execution_start_timestamp = 7; + + // When the worker completed executing the action command. + google.protobuf.Timestamp execution_completed_timestamp = 8; + + // New in v2.3: the amount of time the worker spent executing the action + // command, potentially computed using a worker-specific virtual clock. + // + // The virtual execution duration is only intended to cover the "execution" of + // the specified action and not time in queue nor any overheads before or + // after execution such as marshalling inputs/outputs. The server SHOULD avoid + // including time spent the client doesn't have control over, and MAY extend + // or reduce the execution duration to account for delays or speedups that + // occur during execution itself (e.g., lazily loading data from the Content + // Addressable Storage, live migration of virtual machines, emulation + // overhead). + // + // The method of timekeeping used to compute the virtual execution duration + // MUST be consistent with what is used to enforce the + // [Action][[build.bazel.remote.execution.v2.Action]'s `timeout`. There is no + // relationship between the virtual execution duration and the values of + // `execution_start_timestamp` and `execution_completed_timestamp`. + google.protobuf.Duration virtual_execution_duration = 12; + + // When the worker started uploading action outputs. + google.protobuf.Timestamp output_upload_start_timestamp = 9; + + // When the worker finished uploading action outputs. + google.protobuf.Timestamp output_upload_completed_timestamp = 10; + + // Details that are specific to the kind of worker used. For example, + // on POSIX-like systems this could contain a message with + // getrusage(2) statistics. + repeated google.protobuf.Any auxiliary_metadata = 11; + + // BUILDBUDDY-SPECIFIC FIELDS BELOW. + // Started at field #1000 to avoid conflicts with Bazel. + + // The unique ID of the executor instance that ran this action. + string executor_id = 1000; + + // Any io_stats that were collected during this execution. + IOStats io_stats = 1001; + + // Any compute stats (CPU/memory) that were collected during this execution. + UsageStats usage_stats = 1002; + + // Estimated task size that was used for scheduling purposes. + scheduler.TaskSize estimated_task_size = 1003; + + // Whether the executed Action was marked with `do_not_cache`. + bool do_not_cache = 1004; + + // Metadata associated with the VM that this action was executed on. + VMMetadata vm_metadata = 1005; +} + +// An ActionResult represents the result of an +// [Action][build.bazel.remote.execution.v2.Action] being run. +// +// It is advised that at least one field (for example +// `ActionResult.execution_metadata.Worker`) have a non-default value, to +// ensure that the serialized value is non-empty, which can then be used +// as a basic data sanity check. +message ActionResult { + reserved 1; // Reserved for use as the resource name. + + // The output files of the action. For each output file requested in the + // `output_files` or `output_paths` field of the Action, if the corresponding + // file existed after the action completed, a single entry will be present + // either in this field, or the `output_file_symlinks` field if the file was + // a symbolic link to another file (`output_symlinks` field after v2.1). + // + // If an output listed in `output_files` was found, but was a directory rather + // than a regular file, the server will return a FAILED_PRECONDITION. + // If the action does not produce the requested output, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + repeated OutputFile output_files = 2; + + // The output files of the action that are symbolic links to other files. Those + // may be links to other output files, or input files, or even absolute paths + // outside of the working directory, if the server supports + // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. + // For each output file requested in the `output_files` or `output_paths` + // field of the Action, if the corresponding file existed after + // the action completed, a single entry will be present either in this field, + // or in the `output_files` field, if the file was not a symbolic link. + // + // If an output symbolic link of the same name as listed in `output_files` of + // the Command was found, but its target type was not a regular file, the + // server will return a FAILED_PRECONDITION. + // If the action does not produce the requested output, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + // + // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API + // should still populate this field in addition to `output_symlinks`. + repeated OutputSymlink output_file_symlinks = 10; + + // New in v2.1: this field will only be populated if the command + // `output_paths` field was used, and not the pre v2.1 `output_files` or + // `output_directories` fields. + // The output paths of the action that are symbolic links to other paths. Those + // may be links to other outputs, or inputs, or even absolute paths + // outside of the working directory, if the server supports + // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. + // A single entry for each output requested in `output_paths` + // field of the Action, if the corresponding path existed after + // the action completed and was a symbolic link. + // + // If the action does not produce a requested output, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + repeated OutputSymlink output_symlinks = 12; + + // The output directories of the action. For each output directory requested + // in the `output_directories` or `output_paths` field of the Action, if the + // corresponding directory existed after the action completed, a single entry + // will be present in the output list, which will contain the digest of a + // [Tree][build.bazel.remote.execution.v2.Tree] message containing the + // directory tree, and the path equal exactly to the corresponding Action + // output_directories member. + // + // As an example, suppose the Action had an output directory `a/b/dir` and the + // execution produced the following contents in `a/b/dir`: a file named `bar` + // and a directory named `foo` with an executable file named `baz`. Then, + // output_directory will contain (hashes shortened for readability): + // + // ```json + // // OutputDirectory proto: + // { + // path: "a/b/dir" + // tree_digest: { + // hash: "4a73bc9d03...", + // size: 55 + // } + // } + // // Tree proto with hash "4a73bc9d03..." and size 55: + // { + // root: { + // files: [ + // { + // name: "bar", + // digest: { + // hash: "4a73bc9d03...", + // size: 65534 + // } + // } + // ], + // directories: [ + // { + // name: "foo", + // digest: { + // hash: "4cf2eda940...", + // size: 43 + // } + // } + // ] + // } + // children : { + // // (Directory proto with hash "4cf2eda940..." and size 43) + // files: [ + // { + // name: "baz", + // digest: { + // hash: "b2c941073e...", + // size: 1294, + // }, + // is_executable: true + // } + // ] + // } + // } + // ``` + // If an output of the same name as listed in `output_files` of + // the Command was found in `output_directories`, but was not a directory, the + // server will return a FAILED_PRECONDITION. + repeated OutputDirectory output_directories = 3; + + // The output directories of the action that are symbolic links to other + // directories. Those may be links to other output directories, or input + // directories, or even absolute paths outside of the working directory, + // if the server supports + // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. + // For each output directory requested in the `output_directories` field of + // the Action, if the directory existed after the action completed, a + // single entry will be present either in this field, or in the + // `output_directories` field, if the directory was not a symbolic link. + // + // If an output of the same name was found, but was a symbolic link to a file + // instead of a directory, the server will return a FAILED_PRECONDITION. + // If the action does not produce the requested output, then that output + // will be omitted from the list. The server is free to arrange the output + // list as desired; clients MUST NOT assume that the output list is sorted. + // + // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API + // should still populate this field in addition to `output_symlinks`. + repeated OutputSymlink output_directory_symlinks = 11; + + // The exit code of the command. + int32 exit_code = 4; + + // The standard output buffer of the action. The server SHOULD NOT inline + // stdout unless requested by the client in the + // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] + // message. The server MAY omit inlining, even if requested, and MUST do so if inlining + // would cause the response to exceed message size limits. + // Clients SHOULD NOT populate this field when uploading to the cache. + bytes stdout_raw = 5; + + // The digest for a blob containing the standard output of the action, which + // can be retrieved from the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + Digest stdout_digest = 6; + + // The standard error buffer of the action. The server SHOULD NOT inline + // stderr unless requested by the client in the + // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] + // message. The server MAY omit inlining, even if requested, and MUST do so if inlining + // would cause the response to exceed message size limits. + // Clients SHOULD NOT populate this field when uploading to the cache. + bytes stderr_raw = 7; + + // The digest for a blob containing the standard error of the action, which + // can be retrieved from the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + Digest stderr_digest = 8; + + // The details of the execution that originally produced this result. + ExecutedActionMetadata execution_metadata = 9; +} + +// An `OutputFile` is similar to a +// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an +// output in an `ActionResult`. It allows a full file path rather than +// only a name. +message OutputFile { + // The full path of the file relative to the working directory, including the + // filename. The path separator is a forward slash `/`. Since this is a + // relative path, it MUST NOT begin with a leading forward slash. + string path = 1; + + // The digest of the file's content. + Digest digest = 2; + + reserved 3; // Used for a removed field in an earlier version of the API. + + // True if file is executable, false otherwise. + bool is_executable = 4; + + // The contents of the file if inlining was requested. The server SHOULD NOT inline + // file contents unless requested by the client in the + // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] + // message. The server MAY omit inlining, even if requested, and MUST do so if inlining + // would cause the response to exceed message size limits. + // Clients SHOULD NOT populate this field when uploading to the cache. + bytes contents = 5; + + // The supported node properties of the OutputFile, if requested by the Action. + reserved 6; + NodeProperties node_properties = 7; +} + +// A `Tree` contains all the +// [Directory][build.bazel.remote.execution.v2.Directory] protos in a +// single directory Merkle tree, compressed into one message. +message Tree { + // The root directory in the tree. + Directory root = 1; + + // All the child directories: the directories referred to by the root and, + // recursively, all its children. In order to reconstruct the directory tree, + // the client must take the digests of each of the child directories and then + // build up a tree starting from the `root`. + // Servers SHOULD ensure that these are ordered consistently such that two + // actions producing equivalent output directories on the same server + // implementation also produce Tree messages with matching digests. + repeated Directory children = 2; +} + +// An `OutputDirectory` is the output in an `ActionResult` corresponding to a +// directory's full contents rather than a single file. +message OutputDirectory { + // The full path of the directory relative to the working directory. The path + // separator is a forward slash `/`. Since this is a relative path, it MUST + // NOT begin with a leading forward slash. The empty string value is allowed, + // and it denotes the entire working directory. + string path = 1; + + reserved 2; // Used for a removed field in an earlier version of the API. + + // The digest of the encoded + // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the + // directory's contents. + Digest tree_digest = 3; + + // If set, consumers MAY make the following assumptions about the + // directories contained in the the Tree, so that it may be + // instantiated on a local file system by scanning through it + // sequentially: + // + // - All directories with the same binary representation are stored + // exactly once. + // - All directories, apart from the root directory, are referenced by + // at least one parent directory. + // - Directories are stored in topological order, with parents being + // stored before the child. The root directory is thus the first to + // be stored. + // + // Additionally, the Tree MUST be encoded as a stream of records, + // where each record has the following format: + // + // - A tag byte, having one of the following two values: + // - (1 << 3) | 2 == 0x0a: First record (the root directory). + // - (2 << 3) | 2 == 0x12: Any subsequent records (child directories). + // - The size of the directory, encoded as a base 128 varint. + // - The contents of the directory, encoded as a binary serialized + // Protobuf message. + // + // This encoding is a subset of the Protobuf wire format of the Tree + // message. As it is only permitted to store data associated with + // field numbers 1 and 2, the tag MUST be encoded as a single byte. + // More details on the Protobuf wire format can be found here: + // https://developers.google.com/protocol-buffers/docs/encoding + // + // It is recommended that implementations using this feature construct + // Tree objects manually using the specification given above, as + // opposed to using a Protobuf library to marshal a full Tree message. + // As individual Directory messages already need to be marshaled to + // compute their digests, constructing the Tree object manually avoids + // redundant marshaling. + bool is_topologically_sorted = 4; +} + +// An `OutputSymlink` is similar to a +// [Symlink][build.bazel.remote.execution.v2.SymlinkNode], but it is used as an +// output in an `ActionResult`. +// +// `OutputSymlink` is binary-compatible with `SymlinkNode`. +message OutputSymlink { + // The full path of the symlink relative to the working directory, including the + // filename. The path separator is a forward slash `/`. Since this is a + // relative path, it MUST NOT begin with a leading forward slash. + string path = 1; + + // The target path of the symlink. The path separator is a forward slash `/`. + // The target path can be relative to the parent directory of the symlink or + // it can be an absolute path starting with `/`. Support for absolute paths + // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] + // API. `..` components are allowed anywhere in the target path. + string target = 2; + + // The supported node properties of the OutputSymlink, if requested by the + // Action. + reserved 3; + NodeProperties node_properties = 4; +} + +// An `ExecutionPolicy` can be used to control the scheduling of the action. +message ExecutionPolicy { + // The priority (relative importance) of this action. Generally, a lower value + // means that the action should be run sooner than actions having a greater + // priority value, but the interpretation of a given value is server- + // dependent. A priority of 0 means the *default* priority. Priorities may be + // positive or negative, and such actions should run later or sooner than + // actions having the default priority, respectively. The particular semantics + // of this field is up to the server. In particular, every server will have + // their own supported range of priorities, and will decide how these map into + // scheduling policy. + int32 priority = 1; +} + +// A `ResultsCachePolicy` is used for fine-grained control over how action +// outputs are stored in the CAS and Action Cache. +message ResultsCachePolicy { + // The priority (relative importance) of this content in the overall cache. + // Generally, a lower value means a longer retention time or other advantage, + // but the interpretation of a given value is server-dependent. A priority of + // 0 means a *default* value, decided by the server. + // + // The particular semantics of this field is up to the server. In particular, + // every server will have their own supported range of priorities, and will + // decide how these map into retention/eviction policy. + int32 priority = 1; +} + +// A request message for +// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute]. +message ExecuteRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // If true, the action will be executed even if its result is already + // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache]. + // The execution is still allowed to be merged with other in-flight executions + // of the same action, however - semantically, the service MUST only guarantee + // that the results of an execution with this field set were not visible + // before the corresponding execution request was sent. + // Note that actions from execution requests setting this field set are still + // eligible to be entered into the action cache upon completion, and services + // SHOULD overwrite any existing entries that may exist. This allows + // skip_cache_lookup requests to be used as a mechanism for replacing action + // cache entries that reference outputs no longer available or that are + // poisoned in any way. + // If false, the result may be served from the action cache. + bool skip_cache_lookup = 3; + + reserved 2, 4, 5; // Used for removed fields in an earlier version of the API. + + // The digest of the [Action][build.bazel.remote.execution.v2.Action] to + // execute. + Digest action_digest = 6; + + // An optional policy for execution of the action. + // The server will have a default policy if this is not provided. + ExecutionPolicy execution_policy = 7; + + // An optional policy for the results of this execution in the remote cache. + // The server will have a default policy if this is not provided. + // This may be applied to both the ActionResult and the associated blobs. + ResultsCachePolicy results_cache_policy = 8; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 9; +} + +// A `LogFile` is a log stored in the CAS. +message LogFile { + // The digest of the log contents. + Digest digest = 1; + + // This is a hint as to the purpose of the log, and is set to true if the log + // is human-readable text that can be usefully displayed to a user, and false + // otherwise. For instance, if a command-line client wishes to print the + // server logs to the terminal for a failed action, this allows it to avoid + // displaying a binary file. + bool human_readable = 2; +} + +// The response message for +// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute], +// which will be contained in the [response +// field][google.longrunning.Operation.response] of the +// [Operation][google.longrunning.Operation]. +message ExecuteResponse { + // The result of the action. + ActionResult result = 1; + + // True if the result was served from cache, false if it was executed. + bool cached_result = 2; + + // If the status has a code other than `OK`, it indicates that the action did + // not finish execution. For example, if the operation times out during + // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST + // use this field for errors in execution, rather than the error field on the + // `Operation` object. + // + // If the status code is other than `OK`, then the result MUST NOT be cached. + // For an error status, the `result` field is optional; the server may + // populate the output-, stdout-, and stderr-related fields if it has any + // information available, such as the stdout and stderr of a timed-out action. + google.rpc.Status status = 3; + + // An optional list of additional log outputs the server wishes to provide. A + // server can use this to return execution-specific logs however it wishes. + // This is intended primarily to make it easier for users to debug issues that + // may be outside of the actual job execution, such as by identifying the + // worker executing the action or by providing logs from the worker's setup + // phase. The keys SHOULD be human readable so that a client can display them + // to a user. + map server_logs = 4; + + // Freeform informational message with details on the execution of the action + // that may be displayed to the user upon failure or when requested explicitly. + string message = 5; +} + +// The current stage of action execution. +// +// Even though these stages are numbered according to the order in which +// they generally occur, there is no requirement that the remote +// execution system reports events along this order. For example, an +// operation MAY transition from the EXECUTING stage back to QUEUED +// in case the hardware on which the operation executes fails. +// +// If and only if the remote execution system reports that an operation +// has reached the COMPLETED stage, it MUST set the [done +// field][google.longrunning.Operation.done] of the +// [Operation][google.longrunning.Operation] and terminate the stream. +message ExecutionStage { + enum Value { + // Invalid value. + UNKNOWN = 0; + + // Checking the result against the cache. + CACHE_CHECK = 1; + + // Currently idle, awaiting a free machine to execute. + QUEUED = 2; + + // Currently being executed by a worker. + EXECUTING = 3; + + // Finished execution. + COMPLETED = 4; + } +} + +// Metadata about an ongoing +// [execution][build.bazel.remote.execution.v2.Execution.Execute], which +// will be contained in the [metadata +// field][google.longrunning.Operation.response] of the +// [Operation][google.longrunning.Operation]. +message ExecuteOperationMetadata { + // The current stage of execution. + ExecutionStage.Value stage = 1; + + // The digest of the [Action][build.bazel.remote.execution.v2.Action] + // being executed. + Digest action_digest = 2; + + // If set, the client can use this resource name with + // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the + // standard output from the endpoint hosting streamed responses. + string stdout_stream_name = 3; + + // If set, the client can use this resource name with + // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the + // standard error from the endpoint hosting streamed responses. + string stderr_stream_name = 4; + + // The client can read this field to view details about the ongoing + // execution. + ExecutedActionMetadata partial_execution_metadata = 5; +} + +// A request message for +// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]. +message WaitExecutionRequest { + // The name of the [Operation][google.longrunning.Operation] + // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute]. + string name = 1; +} + +// A request message for +// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult]. +message GetActionResultRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The digest of the [Action][build.bazel.remote.execution.v2.Action] + // whose result is requested. + Digest action_digest = 2; + + // A hint to the server to request inlining stdout in the + // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. + bool inline_stdout = 3; + + // A hint to the server to request inlining stderr in the + // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. + bool inline_stderr = 4; + + // A hint to the server to inline the contents of the listed output files. + // Each path needs to exactly match one file path in either `output_paths` or + // `output_files` (DEPRECATED since v2.1) in the + // [Command][build.bazel.remote.execution.v2.Command] message. + repeated string inline_output_files = 5; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 6; +} + +// A request message for +// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult]. +message UpdateActionResultRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The digest of the [Action][build.bazel.remote.execution.v2.Action] + // whose result is being uploaded. + Digest action_digest = 2; + + // The [ActionResult][build.bazel.remote.execution.v2.ActionResult] + // to store in the cache. + ActionResult action_result = 3; + + // An optional policy for the results of this execution in the remote cache. + // The server will have a default policy if this is not provided. + // This may be applied to both the ActionResult and the associated blobs. + ResultsCachePolicy results_cache_policy = 4; + + // The digest function that was used to compute the action digest. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the action digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; +} + +// A request message for +// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. +message FindMissingBlobsRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // A list of the blobs to check. All digests MUST use the same digest + // function. + repeated Digest blob_digests = 2; + + // The digest function of the blobs whose existence is checked. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 3; +} + +// A response message for +// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. +message FindMissingBlobsResponse { + // A list of the blobs requested *not* present in the storage. + repeated Digest missing_blob_digests = 2; +} + +// A request message for +// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. +message BatchUpdateBlobsRequest { + // A request corresponding to a single blob that the client wants to upload. + message Request { + // The digest of the blob. This MUST be the digest of `data`. All + // digests MUST use the same digest function. + Digest digest = 1; + + // The raw binary data. + bytes data = 2; + + // The format of `data`. Must be `IDENTITY`/unspecified, or one of the + // compressors advertised by the + // [CacheCapabilities.supported_batch_compressors][build.bazel.remote.execution.v2.CacheCapabilities.supported_batch_compressors] + // field. + Compressor.Value compressor = 3; + } + + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The individual upload requests. + repeated Request requests = 2; + + // The digest function that was used to compute the digests of the + // blobs being uploaded. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; +} + +// A response message for +// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. +message BatchUpdateBlobsResponse { + // A response corresponding to a single blob that the client tried to upload. + message Response { + // The blob digest to which this response corresponds. + Digest digest = 1; + + // The result of attempting to upload that blob. + google.rpc.Status status = 2; + } + + // The responses to the requests. + repeated Response responses = 1; +} + +// A request message for +// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. +message BatchReadBlobsRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The individual blob digests. All digests MUST use the same digest + // function. + repeated Digest digests = 2; + + // A list of acceptable encodings for the returned inlined data, in no + // particular order. `IDENTITY` is always allowed even if not specified here. + repeated Compressor.Value acceptable_compressors = 3; + + // The digest function of the blobs being requested. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the blob digest hashes and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 4; +} + +// A response message for +// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. +message BatchReadBlobsResponse { + // A response corresponding to a single blob that the client tried to download. + message Response { + // The digest to which this response corresponds. + Digest digest = 1; + + // The raw binary data. + bytes data = 2; + + // The format the data is encoded in. MUST be `IDENTITY`/unspecified, + // or one of the acceptable compressors specified in the `BatchReadBlobsRequest`. + Compressor.Value compressor = 4; + + // The result of attempting to download that blob. + google.rpc.Status status = 3; + } + + // The responses to the requests. + repeated Response responses = 1; +} + +// A request message for +// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. +message GetTreeRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; + + // The digest of the root, which must be an encoded + // [Directory][build.bazel.remote.execution.v2.Directory] message + // stored in the + // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. + Digest root_digest = 2; + + // A maximum page size to request. If present, the server will request no more + // than this many items. Regardless of whether a page size is specified, the + // server may place its own limit on the number of items to be returned and + // require the client to retrieve more items using a subsequent request. + int32 page_size = 3; + + // A page token, which must be a value received in a previous + // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse]. + // If present, the server will use that token as an offset, returning only + // that page and the ones that succeed it. + string page_token = 4; + + // The digest function that was used to compute the digest of the root + // directory. + // + // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256, + // SHA384, SHA512, or VSO, the client MAY leave this field unset. In + // that case the server SHOULD infer the digest function using the + // length of the root digest hash and the digest functions announced + // in the server's capabilities. + DigestFunction.Value digest_function = 5; +} + +// A response message for +// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. +message GetTreeResponse { + // The directories descended from the requested root. + repeated Directory directories = 1; + + // If present, signifies that there are more results which the client can + // retrieve by passing this as the page_token in a subsequent + // [request][build.bazel.remote.execution.v2.GetTreeRequest]. + // If empty, signifies that this is the last page of results. + string next_page_token = 2; +} + +// A request message for +// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities]. +message GetCapabilitiesRequest { + // The instance of the execution system to operate against. A server may + // support multiple instances of the execution system (with their own workers, + // storage, caches, etc.). The server MAY require use of this field to select + // between them in an implementation-defined fashion, otherwise it can be + // omitted. + string instance_name = 1; +} + +// A response message for +// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities]. +message ServerCapabilities { + // Capabilities of the remote cache system. + CacheCapabilities cache_capabilities = 1; + + // Capabilities of the remote execution system. + ExecutionCapabilities execution_capabilities = 2; + + // Earliest RE API version supported, including deprecated versions. + build.bazel.semver.SemVer deprecated_api_version = 3; + + // Earliest non-deprecated RE API version supported. + build.bazel.semver.SemVer low_api_version = 4; + + // Latest RE API version supported. + build.bazel.semver.SemVer high_api_version = 5; +} + +// The digest function used for converting values into keys for CAS and Action +// Cache. +message DigestFunction { + enum Value { + // It is an error for the server to return this value. + UNKNOWN = 0; + + // The SHA-256 digest function. + SHA256 = 1; + + // The SHA-1 digest function. + SHA1 = 2; + + // The MD5 digest function. + MD5 = 3; + + // The Microsoft "VSO-Hash" paged SHA256 digest function. + // See https://github.com/microsoft/BuildXL/blob/master/Documentation/Specs/PagedHash.md . + VSO = 4; + + // The SHA-384 digest function. + SHA384 = 5; + + // The SHA-512 digest function. + SHA512 = 6; + + // Murmur3 128-bit digest function, x64 variant. Note that this is not a + // cryptographic hash function and its collision properties are not strongly guaranteed. + // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 . + MURMUR3 = 7; + + // The SHA-256 digest function, modified to use a Merkle tree for + // large objects. This permits implementations to store large blobs + // as a decomposed sequence of 2^j sized chunks, where j >= 10, + // while being able to validate integrity at the chunk level. + // + // Furthermore, on systems that do not offer dedicated instructions + // for computing SHA-256 hashes (e.g., the Intel SHA and ARMv8 + // cryptographic extensions), SHA256TREE hashes can be computed more + // efficiently than plain SHA-256 hashes by using generic SIMD + // extensions, such as Intel AVX2 or ARM NEON. + // + // SHA256TREE hashes are computed as follows: + // + // - For blobs that are 1024 bytes or smaller, the hash is computed + // using the regular SHA-256 digest function. + // + // - For blobs that are more than 1024 bytes in size, the hash is + // computed as follows: + // + // 1. The blob is partitioned into a left (leading) and right + // (trailing) blob. These blobs have lengths m and n + // respectively, where m = 2^k and 0 < n <= m. + // + // 2. Hashes of the left and right blob, Hash(left) and + // Hash(right) respectively, are computed by recursively + // applying the SHA256TREE algorithm. + // + // 3. A single invocation is made to the SHA-256 block cipher with + // the following parameters: + // + // M = Hash(left) || Hash(right) + // H = { + // 0xcbbb9d5d, 0x629a292a, 0x9159015a, 0x152fecd8, + // 0x67332667, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481d, + // } + // + // The values of H are the leading fractional parts of the + // square roots of the 9th to the 16th prime number (23 to 53). + // This differs from plain SHA-256, where the first eight prime + // numbers (2 to 19) are used, thereby preventing trivial hash + // collisions between small and large objects. + // + // 4. The hash of the full blob can then be obtained by + // concatenating the outputs of the block cipher: + // + // Hash(blob) = a || b || c || d || e || f || g || h + // + // Addition of the original values of H, as normally done + // through the use of the Davies-Meyer structure, is not + // performed. This isn't necessary, as the block cipher is only + // invoked once. + // + // Test vectors of this digest function can be found in the + // accompanying sha256tree_test_vectors.txt file. + SHA256TREE = 8; + + // The BLAKE3 hash function. + // See https://github.com/BLAKE3-team/BLAKE3. + BLAKE3 = 9; + } +} + +// Describes the server/instance capabilities for updating the action cache. +message ActionCacheUpdateCapabilities { + bool update_enabled = 1; +} + +// Allowed values for priority in +// [ResultsCachePolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] and +// [ExecutionPolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] +// Used for querying both cache and execution valid priority ranges. +message PriorityCapabilities { + // Supported range of priorities, including boundaries. + message PriorityRange { + // The minimum numeric value for this priority range, which represents the + // most urgent task or longest retained item. + int32 min_priority = 1; + // The maximum numeric value for this priority range, which represents the + // least urgent task or shortest retained item. + int32 max_priority = 2; + } + repeated PriorityRange priorities = 1; +} + +// Describes how the server treats absolute symlink targets. +message SymlinkAbsolutePathStrategy { + enum Value { + // Invalid value. + UNKNOWN = 0; + + // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute + // targets. + // If an action tries to create an output symlink with an absolute target, a + // `FAILED_PRECONDITION` will be returned. + DISALLOWED = 1; + + // Server will allow symlink targets to escape the input root tree, possibly + // resulting in non-hermetic builds. + ALLOWED = 2; + } +} + +// Compression formats which may be supported. +message Compressor { + enum Value { + // No compression. Servers and clients MUST always support this, and do + // not need to advertise it. + IDENTITY = 0; + + // Zstandard compression. + ZSTD = 1; + + // RFC 1951 Deflate. This format is identical to what is used by ZIP + // files. Headers such as the one generated by gzip are not + // included. + // + // It is advised to use algorithms such as Zstandard instead, as + // those are faster and/or provide a better compression ratio. + DEFLATE = 2; + + // Brotli compression. + BROTLI = 3; + } +} + +// Capabilities of the remote cache system. +message CacheCapabilities { + // All the digest functions supported by the remote cache. + // Remote cache may support multiple digest functions simultaneously. + repeated DigestFunction.Value digest_functions = 1; + + // Capabilities for updating the action cache. + ActionCacheUpdateCapabilities action_cache_update_capabilities = 2; + + // Supported cache priority range for both CAS and ActionCache. + PriorityCapabilities cache_priority_capabilities = 3; + + // Maximum total size of blobs to be uploaded/downloaded using + // batch methods. A value of 0 means no limit is set, although + // in practice there will always be a message size limitation + // of the protocol in use, e.g. GRPC. + int64 max_batch_total_size_bytes = 4; + + // Whether absolute symlink targets are supported. + SymlinkAbsolutePathStrategy.Value symlink_absolute_path_strategy = 5; + + // Compressors supported by the "compressed-blobs" bytestream resources. + // Servers MUST support identity/no-compression, even if it is not listed + // here. + // + // Note that this does not imply which if any compressors are supported by + // the server at the gRPC level. + repeated Compressor.Value supported_compressors = 6; + + // Compressors supported for inlined data in + // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] + // requests. + repeated Compressor.Value supported_batch_update_compressors = 7; +} + +// Capabilities of the remote execution system. +message ExecutionCapabilities { + // Legacy field for indicating which digest function is supported by the + // remote execution system. It MUST be set to a value other than UNKNOWN. + // Implementations should consider the repeated digest_functions field + // first, falling back to this singular field if digest_functions is unset. + DigestFunction.Value digest_function = 1; + + // Whether remote execution is enabled for the particular server/instance. + bool exec_enabled = 2; + + // Supported execution priority range. + PriorityCapabilities execution_priority_capabilities = 3; + + // Supported node properties. + repeated string supported_node_properties = 4; + + // All the digest functions supported by the remote execution system. + // If this field is set, it MUST also contain digest_function. + // + // Even if the remote execution system announces support for multiple + // digest functions, individual execution requests may only reference + // CAS objects using a single digest function. For example, it is not + // permitted to execute actions having both MD5 and SHA-256 hashed + // files in their input root. + // + // The CAS objects referenced by action results generated by the + // remote execution system MUST use the same digest function as the + // one used to construct the action. + repeated DigestFunction.Value digest_functions = 5; +} + +// Details for the tool used to call the API. +message ToolDetails { + // Name of the tool, e.g. bazel. + string tool_name = 1; + + // Version of the tool used for the request, e.g. 5.0.3. + string tool_version = 2; +} + +// An optional Metadata to attach to any RPC request to tell the server about an +// external context of the request. The server may use this for logging or other +// purposes. To use it, the client attaches the header to the call using the +// canonical proto serialization: +// +// * name: `build.bazel.remote.execution.v2.requestmetadata-bin` +// * contents: the base64 encoded binary `RequestMetadata` message. +// Note: the gRPC library serializes binary headers encoded in base 64 by +// default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests). +// Therefore, if the gRPC library is used to pass/retrieve this +// metadata, the user may ignore the base64 encoding and assume it is simply +// serialized as a binary message. +message RequestMetadata { + // The details for the tool invoking the requests. + ToolDetails tool_details = 1; + + // An identifier that ties multiple requests to the same action. + // For example, multiple requests to the CAS, Action Cache, and Execution + // API are used in order to compile foo.cc. + string action_id = 2; + + // An identifier that ties multiple actions together to a final result. + // For example, multiple actions are required to build and run foo_test. + string tool_invocation_id = 3; + + // An identifier to tie multiple tool invocations together. For example, + // runs of foo_test, bar_test and baz_test on a post-submit of a given patch. + string correlated_invocations_id = 4; + + // A brief description of the kind of action, for example, CppCompile or GoLink. + // There is no standard agreed set of values for this, and they are expected to vary between different client tools. + string action_mnemonic = 5; + + // An identifier for the target which produced this action. + // No guarantees are made around how many actions may relate to a single target. + string target_id = 6; + + // An identifier for the configuration in which the target was built, + // e.g. for differentiating building host tools or different target platforms. + // There is no expectation that this value will have any particular structure, + // or equality across invocations, though some client tools may offer these guarantees. + string configuration_id = 7; + + // BUILDBUDDY EXTENSIONS BELOW + + // Details about the remote executor performing this request on behalf of the + // tool. + ExecutorDetails executor_details = 1000; +} + +/******************************************************************************/ +/* */ +/* BEGIN BUILDBUDDY-SPECIFIC PROTO DECLARATIONS */ +/* */ +/******************************************************************************/ + +// Next tag: 9 +message ExecutionTask { + ExecuteRequest execute_request = 1; + Action action = 4; + Command command = 5; + string execution_id = 6; + string jwt = 2; + string invocation_id = 3; + google.protobuf.Timestamp queued_timestamp = 7; + Platform platform_overrides = 8; + RequestMetadata request_metadata = 9; +} + +// ScheduledTask encapsulates a task based on a client's ExecuteRequest as well +// as the computed scheduling metadata. +message ScheduledTask { + ExecutionTask execution_task = 1; + scheduler.SchedulingMetadata scheduling_metadata = 2; +} + +message ExecutorDetails { + // Unique ID of the host this executor is running on. + string executor_host_id = 1; +} + +message IOStats { + // The number of files downloaded in this tree. + int64 file_download_count = 1; + + // The total size of downloaded data. + int64 file_download_size_bytes = 2; + + // The time taken to download the tree. + int64 file_download_duration_usec = 3; + + // Total number of inputs that were provisioned from the local cache rather + // than downloading from the remote cache. + int64 local_cache_hits = 7; + + // Wall time spent linking inputs from local cache. More precisely, this + // measures the duration between the start time of the first link operation to + // the end time of the last link operation. + google.protobuf.Duration local_cache_link_duration = 8; + + // The number of files uploaded in this tree. + int64 file_upload_count = 4; + + // The total size of uploaded data. + int64 file_upload_size_bytes = 5; + + // The time taken to upload the tree. + int64 file_upload_duration_usec = 6; +} + +// Compute usage sampled throughout a task's execution. +message UsageStats { + // Maximum amount of memory used throughout the task's execution. + int64 peak_memory_bytes = 1; + + // Total number of CPU-nanoseconds consumed by the task. + int64 cpu_nanos = 2; + + // Most recently recorded total memory usage of the task. This field is only + // used for real-time metrics and shouldn't be used as a "summary" metric for + // the task (peak_memory_bytes is a more useful summary metric). + int64 memory_bytes = 3; + + // File system usage counts. + // + // Field names follow the df naming convention: + // https://github.com/coreutils/coreutils/blob/d5868df0d0a6bd09387ece41b62b873fd7c201f9/src/df.c#L1580-L1582 + message FileSystemUsage { + // Filesystem mount source device name. + // Example: "/dev/sda1" + string source = 1; + + // Filesystem mount target path. + // Example: "/" + string target = 2; + + // Filesystem type. + // Example: "ext4" + string fstype = 3; + + // Filesystem used bytes. + int64 used_bytes = 4; + + // Filesystem total size in bytes. + int64 total_bytes = 5; + } + + // Peak file system usage, for each mounted file system. + repeated FileSystemUsage peak_file_system_usage = 4; +} + +// Proto representation of the Execution stored in OLAP DB. Only used in +// backends. +message StoredExecution { + string group_id = 1; + int64 updated_at_usec = 2; + string execution_id = 3; + + string invocation_uuid = 4; + int64 created_at_usec = 5; + string user_id = 6; + string worker = 7; + int64 stage = 8; + + // IO Stats + int64 file_download_count = 9; + int64 file_download_size_bytes = 10; + int64 file_download_duration_usec = 11; + int64 file_upload_count = 12; + int64 file_upload_size_bytes = 13; + int64 file_upload_duration_usec = 14; + + // UsageStats + int64 peak_memory_bytes = 15; + int64 cpu_nanos = 16; + + // Task Sizing + int64 estimated_memory_bytes = 17; + int64 estimated_milli_cpu = 18; + + // ExecutedActionMetadata + int64 queued_timestamp_usec = 19; + int64 worker_start_timestamp_usec = 20; + int64 worker_completed_timestamp_usec = 21; + int64 input_fetch_start_timestamp_usec = 22; + int64 input_fetch_completed_timestamp_usec = 23; + int64 execution_start_timestamp_usec = 24; + int64 execution_completed_timestamp_usec = 25; + int64 output_upload_start_timestamp_usec = 26; + int64 output_upload_completed_timestamp_usec = 27; + + int32 invocation_link_type = 28; + + int32 status_code = 29; + int32 exit_code = 30; +} + +// Metadata associated with a firecracker VM. +message VMMetadata { + // A UUID assigned to the VM upon creation which is preserved across + // snapshots. + string vm_id = 1; + + // Represents a task executed on this VM. + message VMTask { + // The task's invocation ID. If multiple invocations are associated with the + // task (via action merging) then this will be the first invocation to have + // requested execution of this task. + string invocation_id = 1; + + // The task's execution ID. + string execution_id = 2; + + // The action digest associated with the task. The digest function used to + // compute the digest should match the one associated with the invocation. + Digest action_digest = 3; + + // The digest of the execution ID, used as the AC key for the stored + // ExecuteResponse. The digest function used to compute the digest should + // match the one associated with the invocation. + Digest execute_response_digest = 4; + + // A unique UUID assigned each time a snapshot is used. + // For example, even if the same execution is retried multiple times using + // the same snapshot key, each run will have a unqiue snapshot_id. + string snapshot_id = 5; + } + + // The last task to execute on this VM. When resuming from snapshot, this + // represents the task which created the snapshot. + VMTask last_executed_task = 2; + + // The snapshot ID of the task that is currently executing. + string snapshot_id = 3; +} diff --git a/third_party/buildbuddy/proto/resource.proto b/third_party/buildbuddy/proto/resource.proto new file mode 100644 index 000000000..27eb00cef --- /dev/null +++ b/third_party/buildbuddy/proto/resource.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +import "third_party/buildbuddy/proto/remote_execution.proto"; + +package resource; + +//////////////////////////////////////////////////////////////////////////////// +// +// Stored file protos. Use caution, these protos may be stored to disk. +// +//////////////////////////////////////////////////////////////////////////////// + +message ResourceName { + // The digest (hash + size) of this resource. + // Ex. 17547d520cf27d13aaaacbcf47cc33e1918ef98b7f252dfda28824b26052551/188 + build.bazel.remote.execution.v2.Digest digest = 1; + + // The namespace (remote instance name) this resource is stored in. + // Ex. "", "ios/1", "my_remote_instance" + string instance_name = 2; + + // The compressor (if any) used to compress the resource data. + // Ex. IDENTITY, ZSTD, DEFLATE. + build.bazel.remote.execution.v2.Compressor.Value compressor = 3; + + // The cache isolation type of this resource. For now, that can + // be either CAS or AC. Other cache types may exist in the future. + // Ex. CAS, AC + CacheType cache_type = 4; + + // The digest function used to hash this resource and create the digest. + // If unset, the value is inferred from the digest.Hash length, but note + // that this is only correct for the following digest types: + // MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, VSO. + build.bazel.remote.execution.v2.DigestFunction.Value digest_function = 5; +} + +// CacheType represents the type of cache being written to. +enum CacheType { + UNKNOWN_CACHE_TYPE = 0; + // Action cache (AC). + AC = 1; + // Content addressable storage (CAS) cache. + CAS = 2; +} diff --git a/third_party/buildbuddy/proto/scheduler.proto b/third_party/buildbuddy/proto/scheduler.proto new file mode 100644 index 000000000..e02dbafa8 --- /dev/null +++ b/third_party/buildbuddy/proto/scheduler.proto @@ -0,0 +1,319 @@ +syntax = "proto3"; + +import "google/rpc/status.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "third_party/buildbuddy/proto/acl.proto"; +import "third_party/buildbuddy/proto/context.proto"; +import "third_party/buildbuddy/proto/trace.proto"; + +package scheduler; + +message NodeAddress { + // The node's hostname. Must be reachable from the scheduler. + string host = 1; + + // The node's port. + int32 port = 2; +} + +message LeaseTaskRequest { + // The task for which to request a lease. If successful, a LeaseTaskResponse + // will be returned containing the serialized task and duration of the lease. + string task_id = 1; + + // Indicates that the leased task has been completed and can be deleted. + // Mutually exclusive with `release`. + bool finalize = 2; + + // DEPRECATED + // Indicates that the lease should be released without finalizing (deleting) + // the task. + // Mutually exclusive with `finalize`. + bool release = 3; + + // ID of the executor making the request. + string executor_id = 4; + + // Indicates that the leased task could not be run to completion and should + // be re-enqueued to be retried. + bool re_enqueue = 5; + // Optional description of why the task needs to be re-enqueued (may be + // visible to end user). + google.rpc.Status re_enqueue_reason = 6; + + // Indicates whether the client supports lease reconnection. + // + // When set to true, and the client is attempting to lease a task that is in + // "reconnecting" state, the server will use the `reconnect_token` to validate + // the lease attempt. If the token is invalid, the server will return a + // NOT_FOUND error. + // + // Otherwise (if this field is false), the server will treat tasks in + // "reconnecting" state the same way that it treats unclaimed tasks. This + // behavior ensures backwards compatibility for older executors which don't + // support reconnection. + bool supports_reconnect = 7; + + // The token issued by the server when initially establishing the lease. This + // should be set by the client when attempting to retry a disconnected lease. + string reconnect_token = 8; +} + +message LeaseTaskResponse { + // The serialized task will be set in the *first* LeaseTaskResponse returned. + // from the server. Subsequent responses will *only* include a lease duration. + bytes serialized_task = 1; + + // The remaining duration of this lease. To continue to hold the lease, the + // client *must* send another LeaseTaskRequest before time.Now() + + // lease_duration_seconds. + int64 lease_duration_seconds = 2; + + // Whether or not the lease was closed cleanly. + bool closed_cleanly = 3; + + // A token that may be used to retry the lease if it disconnects. + // DEPRECATED: updated executors will use the lease_id as the reconnect token. + string reconnect_token = 4; + + // ID for this lease. The scheduler will ignore any mutation requests if the + // provided lease ID doesn't match the current lease ID. + string lease_id = 5; + + // If true, indicates that the client may reclaim an existing lease by + // resending a LeaseTaskRequest with the same lease_id. + bool supports_reconnect = 6; +} + +message TaskSize { + // The tasks's estimated memory usage. + int64 estimated_memory_bytes = 1; + + // The task's estimated cpu usage. + int64 estimated_milli_cpu = 2; + + // The task's estimated disk space requirement (beyond task inputs). + int64 estimated_free_disk_bytes = 3; +} + +// Next ID: 9 +message SchedulingMetadata { + // Task size used for scheduling purposes, when the scheduler is deciding + // which executors (if any) may execute a task, and also when an executor is + // deciding which task to dequeue. Executors may see a different value of this + // field than what the scheduler sees, depending on measured_task_size. See + // documentation of that field for more info. + TaskSize task_size = 1; + + // Task size measured from a previous task execution of a similar task, if + // such data is available. + // + // The scheduler may use this size to compute an adjusted `task_size` just + // before enqueueing a task onto an executor, but the adjusted size should not + // exceed the executor's limits. + TaskSize measured_task_size = 7; + + // Task size computed via prediction model. This is only necessary when we + // a measured task size is not available. + // + // The scheduler may use this size to compute an adjusted `task_size` just + // before enqueueing a task onto an executor, but the adjusted size should not + // exceed the executor's limits. + TaskSize predicted_task_size = 8; + + string os = 2; + string arch = 3; + string pool = 4; + // Group ID that owns the executors on which the task is to be executed. + // May be different from the Group ID of the user that issued the Execute + // request. + string executor_group_id = 5; + // Group ID of the user that issued the Execute request. + string task_group_id = 6; + + // A signal to the executor that the size of this queued task should be + // tracked as part of the queued-or-assigned size metrics. This is necessary + // because tasks may be scheduled on multiple executors, but should only + // contributed to this system-wide metric once, so the scheduler must inform + // exactly one of the executors to perform the queued task size tracking. + // This is for metrics purposes only and shouldn't affect the behavior of the + // scheduler or the executor. + bool track_queued_task_size = 9; +} + +message ScheduleTaskRequest { + string task_id = 1; + SchedulingMetadata metadata = 2; + bytes serialized_task = 3; +} + +message ScheduleTaskResponse { + // Intentionally left blank. +} + +message ReEnqueueTaskRequest { + string task_id = 1; + // Optional reason for the re-enqueue (may be visible to end-user). + string reason = 2; + // Lease ID of the claim on the task. The request will be ignored if the + // lease ID doesn't match the current lease ID. + string lease_id = 3; +} + +message ReEnqueueTaskResponse { + // Intentionally left blank. +} + +message EnqueueTaskReservationRequest { + string task_id = 1; + TaskSize task_size = 2; + SchedulingMetadata scheduling_metadata = 3; + + // If set, enqueue the task reservation on the given executor instance if it + // is directly connected to the scheduler that receives this request. + // + // If unset, or if there is no such connected executor, select any directly + // connected executor suitable for running the task. + // + // Ex. "610a4cd4-3c0f-41bb-ad72-abe933837d58" + string executor_id = 4; + + // If set, the executor client should wait this long before making the task + // available for scheduling. The server will set this field when re-enqueuing + // tasks that are currently in "reconnecting" state, so that the client which + // is trying to reconnect its lease can have a short grace period during which + // it can retry the lease. + google.protobuf.Duration delay = 5; + + // Used to propagate trace information from the initial Execute request. + // Normally trace information is automatically propagated via RPC metadata but + // that doesn't work for streamed task reservations since there's one + // long-running streaming RPC from the executor to the scheduler. + trace.Metadata trace_metadata = 100; +} + +message EnqueueTaskReservationResponse { + string task_id = 1; +} + +message RegisterExecutorRequest { + ExecutionNode node = 1; +} + +message ShuttingDownRequest { + // Task IDs that are in the executor queue. + repeated string task_id = 1; +} + +message RegisterAndStreamWorkRequest { + // Only one of the fields should be sent. oneofs not used due to awkward Go + // APIs. + + // Request to register the executor with the scheduler. + // This message should be sent immediately after establishing stream and be + // resent periodically as long as the executor should continue to receive task + // reservations. + RegisterExecutorRequest register_executor_request = 1; + + // Response to a previous EnqueueTaskReservationRequest. + EnqueueTaskReservationResponse enqueue_task_reservation_response = 2; + + // Notifications to the scheduler that this executor is going away. + ShuttingDownRequest shutting_down_request = 3; +} + +message RegisterAndStreamWorkResponse { + // Request to enqueue a task reservation. A EnqueueTaskReservationResponse + // message will be sent to ack the task reservation. + EnqueueTaskReservationRequest enqueue_task_reservation_request = 3; +} + +service Scheduler { + rpc RegisterAndStreamWork(stream RegisterAndStreamWorkRequest) + returns (stream RegisterAndStreamWorkResponse) {} + + rpc LeaseTask(stream LeaseTaskRequest) returns (stream LeaseTaskResponse) {} + + rpc ScheduleTask(ScheduleTaskRequest) returns (ScheduleTaskResponse) {} + + rpc ReEnqueueTask(ReEnqueueTaskRequest) returns (ReEnqueueTaskResponse) {} + + // Request to enqueue a task reservation for an existing task to a locally + // chosen executor. + rpc EnqueueTaskReservation(EnqueueTaskReservationRequest) + returns (EnqueueTaskReservationResponse) {} +} + +message ExecutionNode { + // Remote execution node host. + // Ex. "10.52.6.5" + string host = 1; + + // Remote execution node port. + // Ex. 1987 + int32 port = 2 [deprecated = true]; + + // Assignable memory bytes in remote execution node. + // Ex. 26843545600 + int64 assignable_memory_bytes = 3; + + // Assignable cpu in remote execution node. + // Ex. 7000 + int64 assignable_milli_cpu = 4; + + // Remote execution node operating system. + // Ex. "linux". + string os = 5; + + // Architecture of the remote execution node. + // Ex. "amd64" + string arch = 6; + + // Remote execution pool that this node is assigned to. + // Ex. "buildbuddy-executors-us-west1-b" + string pool = 7; + + // Version of the executor binary. + string version = 8; + + // Unique ID that identifies this executor instance within a node pool. It is + // set once when the executor binary starts and preserved for the lifetime of + // the executor. Each executor generates its own ID on startup. + // + // Ex. "34c5cf7e-b3b1-4e20-b43c-3e196b30d983" + string executor_id = 9; + + // ID of the host this executor is running on + // + // Ex. "8BiY6U0F" + string executor_host_id = 10; +} + +message GetExecutionNodesRequest { + context.RequestContext request_context = 1; +} + +message GetExecutionNodesResponse { + context.ResponseContext response_context = 1; + + repeated Executor executor = 2; + + message Executor { + ExecutionNode node = 1; + + // Whether tasks will be routed to this node by default. + bool is_default = 2; + } + + bool user_owned_executors_supported = 3; +} + +// Persisted information about connected executors. +message RegisteredExecutionNode { + ExecutionNode registration = 1; + string scheduler_host_port = 2; + string group_id = 3; + acl.ACL acl = 4; + google.protobuf.Timestamp last_ping_time = 5; +} diff --git a/third_party/buildbuddy/proto/semver.proto b/third_party/buildbuddy/proto/semver.proto new file mode 100644 index 000000000..6eb1a401d --- /dev/null +++ b/third_party/buildbuddy/proto/semver.proto @@ -0,0 +1,41 @@ +// Copyright 2018 The Bazel Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package build.bazel.semver; + +option csharp_namespace = "Build.Bazel.Semver"; +option go_package = "proto"; +option java_multiple_files = true; +option java_outer_classname = "SemverProto"; +option java_package = "build.bazel.semver"; +option objc_class_prefix = "SMV"; + +// The full version of a given tool. +message SemVer { + // The major version, e.g 10 for 10.2.3. + int32 major = 1; + + // The minor version, e.g. 2 for 10.2.3. + int32 minor = 2; + + // The patch version, e.g 3 for 10.2.3. + int32 patch = 3; + + // The pre-release version. Either this field or major/minor/patch fields + // must be filled. They are mutually exclusive. Pre-release versions are + // assumed to be earlier than any released versions. + string prerelease = 4; +} diff --git a/third_party/buildbuddy/proto/stat_filter.proto b/third_party/buildbuddy/proto/stat_filter.proto new file mode 100644 index 000000000..5d3a6bf64 --- /dev/null +++ b/third_party/buildbuddy/proto/stat_filter.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package stat_filter; + +enum InvocationMetricType { + UNKNOWN_INVOCATION_METRIC = 0; + DURATION_USEC_INVOCATION_METRIC = 1; + CAS_CACHE_MISSES_INVOCATION_METRIC = 2; + UPDATED_AT_USEC_INVOCATION_METRIC = 3; + CAS_CACHE_DOWNLOAD_SIZE_INVOCATION_METRIC = 4; + CAS_CACHE_DOWNLOAD_SPEED_INVOCATION_METRIC = 5; + CAS_CACHE_UPLOAD_SIZE_INVOCATION_METRIC = 6; + CAS_CACHE_UPLOAD_SPEED_INVOCATION_METRIC = 7; + ACTION_CACHE_MISSES_INVOCATION_METRIC = 8; + TIME_SAVED_USEC_INVOCATION_METRIC = 9; +} + +enum ExecutionMetricType { + UNKNOWN_EXECUTION_METRIC = 0; + QUEUE_TIME_USEC_EXECUTION_METRIC = 1; + UPDATED_AT_USEC_EXECUTION_METRIC = 2; + INPUT_DOWNLOAD_TIME_EXECUTION_METRIC = 3; + REAL_EXECUTION_TIME_EXECUTION_METRIC = 4; + OUTPUT_UPLOAD_TIME_EXECUTION_METRIC = 5; + PEAK_MEMORY_EXECUTION_METRIC = 6; + INPUT_DOWNLOAD_SIZE_EXECUTION_METRIC = 7; + OUTPUT_UPLOAD_SIZE_EXECUTION_METRIC = 8; +} + +message Metric { + optional InvocationMetricType invocation = 1; + optional ExecutionMetricType execution = 2; +} + +message StatFilter { + Metric metric = 1; + optional int64 min = 2; + optional int64 max = 3; +} diff --git a/third_party/buildbuddy/proto/target.proto b/third_party/buildbuddy/proto/target.proto new file mode 100644 index 000000000..385e8f508 --- /dev/null +++ b/third_party/buildbuddy/proto/target.proto @@ -0,0 +1,215 @@ +syntax = "proto3"; + +import "third_party/buildbuddy/proto/api/v1/common.proto"; +import "third_party/buildbuddy/proto/context.proto"; +import "third_party/bazel/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto"; + +package target; + +// A single "Target" that is part of a build. +message TargetMetadata { + // The id of this target. + // For example: "TS12345". + // DEPRECATED: Use repo_url and label to identify a target. + string id = 1 [deprecated = true]; + + // The label of the target. + // For example: "//server/test:foo" + string label = 2; + + // The type of the target rule. + // For example: java_binary + string rule_type = 3; + + // The (enum) type of this target. + // For example: APPLICATION, BINARY, TEST. + api.v1.TargetType target_type = 4; + + // The (enum) size of this target. + // For example: SMALL, MEDIUM, ENORMOUS. + api.v1.TestSize test_size = 5; +} + +message TargetStatus { + // The invocation identifier itself. + string invocation_id = 1; + + // The commit SHA that this invocation was for. + // For example: "e6a712c7c15b87ea772e13468fdbf78ecf3ed43d" + string commit_sha = 2; + + // The aggregate status of the target. Targets can be run multiple times by + // bazel which computes an "aggregate" enum status, like PASSED, FAILED, or + // FLAKY. + api.v1.Status status = 3; + + // When this target started and its duration. + // Note: The target's start time is when the test is run and it's different + // from invocation_created_at_usec when the target is cached. + api.v1.Timing timing = 4; + + // When the invocation was created. + int64 invocation_created_at_usec = 5; +} + +message TargetHistory { + // The target that was run. + TargetMetadata target = 1; + + // The git repo the build was for. + // For example: "buildbuddy-io/buildbuddy" + string repo_url = 2; + + // A list of target statuses run across a range of invocations / commits. + // If multiple targets were run at the same commit, the latest run will be + // returned. + repeated TargetStatus target_status = 3; +} + +// NB: TargetQuery params apply to both invocations and their child targets. For +// example, filtering to role: "CI" and target_type: TEST will only return +// invocations that were run via CI and within each of those only targets of +// type TEST. +message TargetQuery { + // The search parameters in this query will be ANDed when performing a + // query -- so if a client specifies both "user" and "host", all results + // returned must match both fields. + + // The unix-user who performed the build. + string user = 1; + + // The host this build was executed on. + string host = 2; + + // The git repo the build was for. + string repo_url = 3; + + // The commit sha used for the build. + string commit_sha = 4; + + // The role played by the build. Ex: "CI" + string role = 5; + + // The type of target to return. + // For example: TEST. + api.v1.TargetType target_type = 6; + + // The git branch the build was for. + string branch_name = 7; +} + +message GetTargetHistoryRequest { + // The request context. + context.RequestContext request_context = 1; + + // The filters to apply to this query. Required. + // When server_side_pagination = true, only repo_url takes effect. + TargetQuery query = 2; + + // Return records that were run *after* this timestamp. + // Deprecated when server_side_pagination = true. + int64 start_time_usec = 3; + + // Return records that were run *before* this timestamp. + // Deprecated when server_side_pagination = true. + int64 end_time_usec = 4; + + // This boolean is used to roll out server side pagination. + bool server_side_pagination = 5; + + // The pagination token. If unset, the server returns the first page of + // the result. + string page_token = 6; +} + +message GetTargetHistoryResponse { + // The response context. + context.ResponseContext response_context = 1; + + // The targets and statuses that matched the query, ordered by + // the time they were executed, descending. + repeated TargetHistory invocation_targets = 2; + + // Indicates if the server had to truncate results because of size. If true, + // the client should fetch additional time ranges working backwards from the + // oldest timestamp returned. + bool truncated_results = 3; + + // The pagination token to retrieve the next page of results. + string next_page_token = 4; +} + +// Target details along with optional artifacts associated with the target. +message Target { + // Target metadata. + TargetMetadata metadata = 1; + + // Target status. + api.v1.Status status = 2; + + // Target timing. + api.v1.Timing timing = 3; + + // If the build failed, whether this target is one of the root cause targets. + bool root_cause = 4; + + // Files associated with the target. + repeated build_event_stream.File files = 5; + + // Test results associated with the target. The whole event is included + // because the ID contains useful info, such as configuration. + repeated build_event_stream.BuildEvent test_result_events = 6; + + // Test summary associated with the target. + build_event_stream.TestSummary test_summary = 7; + + // ActionCompleted events associated with the target. + repeated build_event_stream.BuildEvent action_events = 8; +} + +message TargetGroup { + // Status of all targets in the group. + api.v1.Status status = 1; + + // Targets in the group. If this is empty but next_page_token is set, this + // means that the invocation is still in progress and that more targets may be + // fetched later. + repeated Target targets = 2; + + // Page token for fetching the next target group. + string next_page_token = 3; + + // Total target count in this group across all pages. + int64 total_count = 4; +} + +message GetTargetRequest { + context.RequestContext request_context = 6; + + // Invocation ID to fetch targets for. + string invocation_id = 1; + + // Optional, if nonempty, only targets with this label will be + // returned. If empty or unset, all targets will be returned. + string target_label = 2; + + // Optional, if set, only targets with this status will be returned. + // If not set, all targets will be returned. + optional api.v1.Status status = 3; + + string page_token = 4; + + // Only return targets whose labels contain this substring (case-insensitive). + // + // When requesting the artifact listing (status 0), only return targets whose + // labels contain this substring or any of their file names contain this + // substring (case insensitive), and if any file names are matched then + // restrict the file listing to just the matched files. + string filter = 5; +} + +message GetTargetResponse { + context.ResponseContext response_context = 2; + + repeated TargetGroup target_groups = 1; +} diff --git a/third_party/buildbuddy/proto/trace.proto b/third_party/buildbuddy/proto/trace.proto new file mode 100644 index 000000000..34483c26b --- /dev/null +++ b/third_party/buildbuddy/proto/trace.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package trace; + +message Metadata { + map entries = 1; +} diff --git a/third_party/buildbuddy/proto/user_id.proto b/third_party/buildbuddy/proto/user_id.proto index b6e5d720d..75d2376e5 100644 --- a/third_party/buildbuddy/proto/user_id.proto +++ b/third_party/buildbuddy/proto/user_id.proto @@ -22,4 +22,13 @@ message DisplayUser { Name name = 2; string profile_image_url = 3; string email = 4; + AccountType account_type = 5; +} + +enum AccountType { + UNKNOWN_USER_TYPE = 0; + GOOGLE = 1; + GITHUB = 2; + SAML = 3; + OIDC = 4; }