Santiago Gil pushed to branch santigl/100-server-capabilities at BuildGrid / buildgrid
Commits:
-
c44ec8a6
by Martin Blanchard at 2019-02-13T11:44:11Z
-
1c60ccc1
by Martin Blanchard at 2019-02-14T13:49:54Z
-
56f129ed
by Martin Blanchard at 2019-02-14T13:49:57Z
-
f07c8895
by Martin Blanchard at 2019-02-14T13:50:03Z
-
4e96eb89
by Martin Blanchard at 2019-02-14T13:50:06Z
-
3720db50
by Martin Blanchard at 2019-02-14T13:50:10Z
-
f768c632
by Martin Blanchard at 2019-02-14T13:50:14Z
-
d0a8c0bc
by Martin Blanchard at 2019-02-14T13:50:17Z
-
78efe8c6
by Santiago Gil at 2019-02-18T16:32:52Z
-
ed7ab756
by Martin Blanchard at 2019-02-19T16:23:03Z
-
95f83758
by Martin Blanchard at 2019-02-19T16:23:19Z
-
b9f59bf3
by Martin Blanchard at 2019-02-19T16:25:13Z
-
4c971b76
by Santiago Gil at 2019-02-20T10:46:59Z
-
4796f913
by Santiago Gil at 2019-02-20T15:24:09Z
23 changed files:
- buildgrid/_app/settings/parser.py
- buildgrid/_app/settings/reference.yml
- buildgrid/_protos/build/bazel/remote/execution/v2/remote_execution.proto
- buildgrid/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py
- buildgrid/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py
- buildgrid/_protos/build/bazel/semver/semver.proto
- buildgrid/client/cas.py
- buildgrid/server/actioncache/storage.py
- buildgrid/server/bots/instance.py
- buildgrid/server/capabilities/instance.py
- buildgrid/server/capabilities/service.py
- buildgrid/server/cas/instance.py
- buildgrid/server/controller.py
- buildgrid/server/execution/instance.py
- buildgrid/server/job.py
- buildgrid/server/operations/instance.py
- buildgrid/server/referencestorage/storage.py
- buildgrid/server/scheduler.py
- buildgrid/settings.py
- buildgrid/utils.py
- tests/integration/execution_service.py
- tests/integration/operations_service.py
- + tests/test_utils.py
Changes:
... | ... | @@ -235,8 +235,8 @@ class Execution(YamlFactory): |
235 | 235 |
|
236 | 236 |
yaml_tag = u'!execution'
|
237 | 237 |
|
238 |
- def __new__(cls, storage, action_cache=None):
|
|
239 |
- return ExecutionController(action_cache, storage)
|
|
238 |
+ def __new__(cls, storage, action_cache=None, action_browser_url=None):
|
|
239 |
+ return ExecutionController(storage, action_cache, action_browser_url)
|
|
240 | 240 |
|
241 | 241 |
|
242 | 242 |
class Action(YamlFactory):
|
... | ... | @@ -75,7 +75,7 @@ instances: |
75 | 75 |
# Whether or not writing to the cache is allowed.
|
76 | 76 |
allow-updates: true
|
77 | 77 |
##
|
78 |
- # Whether failed actions (non-zero exit code) are stored
|
|
78 |
+ # Whether failed actions (non-zero exit code) are stored.
|
|
79 | 79 |
cache-failed-actions: true
|
80 | 80 |
|
81 | 81 |
- !execution
|
... | ... | @@ -85,6 +85,9 @@ instances: |
85 | 85 |
##
|
86 | 86 |
# Alias to an action-cache service.
|
87 | 87 |
action-cache: *main-action
|
88 |
+ ##
|
|
89 |
+ # Base URL for external build action (web) browser service.
|
|
90 |
+ action-browser-url: http://localhost:8080
|
|
88 | 91 |
|
89 | 92 |
- !cas
|
90 | 93 |
##
|
... | ... | @@ -81,6 +81,7 @@ service Execution { |
81 | 81 |
// action will be reported in the `status` field of the `ExecuteResponse`. The
|
82 | 82 |
// server MUST NOT set the `error` field of the `Operation` proto.
|
83 | 83 |
// The possible errors include:
|
84 |
+ //
|
|
84 | 85 |
// * `INVALID_ARGUMENT`: One or more arguments are invalid.
|
85 | 86 |
// * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
|
86 | 87 |
// action requested, such as a missing input or command or no worker being
|
... | ... | @@ -140,6 +141,7 @@ service ActionCache { |
140 | 141 |
// Retrieve a cached execution result.
|
141 | 142 |
//
|
142 | 143 |
// Errors:
|
144 |
+ //
|
|
143 | 145 |
// * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
|
144 | 146 |
rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
|
145 | 147 |
option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
|
... | ... | @@ -147,11 +149,6 @@ service ActionCache { |
147 | 149 |
|
148 | 150 |
// Upload a new execution result.
|
149 | 151 |
//
|
150 |
- // This method is intended for servers which implement the distributed cache
|
|
151 |
- // independently of the
|
|
152 |
- // [Execution][build.bazel.remote.execution.v2.Execution] API. As a
|
|
153 |
- // result, it is OPTIONAL for servers to implement.
|
|
154 |
- //
|
|
155 | 152 |
// In order to allow the server to perform access control based on the type of
|
156 | 153 |
// action, and to assist with client debugging, the client MUST first upload
|
157 | 154 |
// the [Action][build.bazel.remote.execution.v2.Execution] that produced the
|
... | ... | @@ -160,7 +157,10 @@ service ActionCache { |
160 | 157 |
// `ContentAddressableStorage`.
|
161 | 158 |
//
|
162 | 159 |
// Errors:
|
163 |
- // * `UNIMPLEMENTED`: This method is not supported by the server.
|
|
160 |
+ //
|
|
161 |
+ // * `INVALID_ARGUMENT`: One or more arguments are invalid.
|
|
162 |
+ // * `FAILED_PRECONDITION`: One or more errors occurred in updating the
|
|
163 |
+ // action result, such as a missing command or action.
|
|
164 | 164 |
// * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
|
165 | 165 |
// entry to the cache.
|
166 | 166 |
rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
|
... | ... | @@ -207,6 +207,9 @@ service ActionCache { |
207 | 207 |
// by the server. For servers which do not support multiple instances, then the
|
208 | 208 |
// `instance_name` is the empty path and the leading slash is omitted, so that
|
209 | 209 |
// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
|
210 |
+// To simplify parsing, a path segment cannot equal any of the following
|
|
211 |
+// keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
|
|
212 |
+// `capabilities`.
|
|
210 | 213 |
//
|
211 | 214 |
// When attempting an upload, if another client has already completed the upload
|
212 | 215 |
// (which may occur in the middle of a single upload if another client uploads
|
... | ... | @@ -258,10 +261,12 @@ service ContentAddressableStorage { |
258 | 261 |
// independently.
|
259 | 262 |
//
|
260 | 263 |
// Errors:
|
264 |
+ //
|
|
261 | 265 |
// * `INVALID_ARGUMENT`: The client attempted to upload more than the
|
262 | 266 |
// server supported limit.
|
263 | 267 |
//
|
264 | 268 |
// Individual requests may return the following errors, additionally:
|
269 |
+ //
|
|
265 | 270 |
// * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
|
266 | 271 |
// * `INVALID_ARGUMENT`: The
|
267 | 272 |
// [Digest][build.bazel.remote.execution.v2.Digest] does not match the
|
... | ... | @@ -284,6 +289,7 @@ service ContentAddressableStorage { |
284 | 289 |
// independently.
|
285 | 290 |
//
|
286 | 291 |
// Errors:
|
292 |
+ //
|
|
287 | 293 |
// * `INVALID_ARGUMENT`: The client attempted to read more than the
|
288 | 294 |
// server supported limit.
|
289 | 295 |
//
|
... | ... | @@ -390,7 +396,8 @@ message Action { |
390 | 396 |
// immediately, rather than whenever the cache entry gets evicted.
|
391 | 397 |
google.protobuf.Duration timeout = 6;
|
392 | 398 |
|
393 |
- // If true, then the `Action`'s result cannot be cached.
|
|
399 |
+ // If true, then the `Action`'s result cannot be cached, and in-flight
|
|
400 |
+ // requests for the same `Action` may not be merged.
|
|
394 | 401 |
bool do_not_cache = 7;
|
395 | 402 |
}
|
396 | 403 |
|
... | ... | @@ -421,7 +428,8 @@ message Command { |
421 | 428 |
// provide its own default environment variables; these defaults can be
|
422 | 429 |
// overridden using this field. Additional variables can also be specified.
|
423 | 430 |
//
|
424 |
- // In order to ensure that equivalent `Command`s always hash to the same
|
|
431 |
+ // In order to ensure that equivalent
|
|
432 |
+ // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same
|
|
425 | 433 |
// value, the environment variables MUST be lexicographically sorted by name.
|
426 | 434 |
// Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
|
427 | 435 |
repeated EnvironmentVariable environment_variables = 2;
|
... | ... | @@ -444,6 +452,9 @@ message Command { |
444 | 452 |
//
|
445 | 453 |
// An output file cannot be duplicated, be a parent of another output file, or
|
446 | 454 |
// have the same path as any of the listed output directories.
|
455 |
+ //
|
|
456 |
+ // Directories leading up to the output files are created by the worker prior
|
|
457 |
+ // to execution, even if they are not explicitly part of the input root.
|
|
447 | 458 |
repeated string output_files = 3;
|
448 | 459 |
|
449 | 460 |
// A list of the output directories that the client expects to retrieve from
|
... | ... | @@ -468,6 +479,10 @@ message Command { |
468 | 479 |
//
|
469 | 480 |
// An output directory cannot be duplicated or have the same path as any of
|
470 | 481 |
// the listed output files.
|
482 |
+ //
|
|
483 |
+ // Directories leading up to the output directories (but not the output
|
|
484 |
+ // directories themselves) are created by the worker prior to execution, even
|
|
485 |
+ // if they are not explicitly part of the input root.
|
|
471 | 486 |
repeated string output_directories = 4;
|
472 | 487 |
|
473 | 488 |
// The platform requirements for the execution environment. The server MAY
|
... | ... | @@ -531,12 +546,18 @@ message Platform { |
531 | 546 |
// In order to ensure that two equivalent directory trees hash to the same
|
532 | 547 |
// value, the following restrictions MUST be obeyed when constructing a
|
533 | 548 |
// a `Directory`:
|
534 |
-// - Every child in the directory must have a path of exactly one segment.
|
|
535 |
-// Multiple levels of directory hierarchy may not be collapsed.
|
|
536 |
-// - Each child in the directory must have a unique path segment (file name).
|
|
537 |
-// - The files, directories and symlinks in the directory must each be sorted
|
|
538 |
-// in lexicographical order by path. The path strings must be sorted by code
|
|
539 |
-// point, equivalently, by UTF-8 bytes.
|
|
549 |
+//
|
|
550 |
+// * Every child in the directory must have a path of exactly one segment.
|
|
551 |
+// Multiple levels of directory hierarchy may not be collapsed.
|
|
552 |
+// * Each child in the directory must have a unique path segment (file name).
|
|
553 |
+// Note that while the API itself is case-sensitive, the environment where
|
|
554 |
+// the Action is executed may or may not be case-sensitive. That is, it is
|
|
555 |
+// legal to call the API with a Directory that has both "Foo" and "foo" as
|
|
556 |
+// children, but the Action may be rejected by the remote system upon
|
|
557 |
+// execution.
|
|
558 |
+// * The files, directories and symlinks in the directory must each be sorted
|
|
559 |
+// in lexicographical order by path. The path strings must be sorted by code
|
|
560 |
+// point, equivalently, by UTF-8 bytes.
|
|
540 | 561 |
//
|
541 | 562 |
// A `Directory` that obeys the restrictions is said to be in canonical form.
|
542 | 563 |
//
|
... | ... | @@ -656,11 +677,12 @@ message SymlinkNode { |
656 | 677 |
// When a `Digest` is used to refer to a proto message, it always refers to the
|
657 | 678 |
// message in binary encoded form. To ensure consistent hashing, clients and
|
658 | 679 |
// servers MUST ensure that they serialize messages according to the following
|
659 |
-// rules, even if there are alternate valid encodings for the same message.
|
|
660 |
-// - Fields are serialized in tag order.
|
|
661 |
-// - There are no unknown fields.
|
|
662 |
-// - There are no duplicate fields.
|
|
663 |
-// - Fields are serialized according to the default semantics for their type.
|
|
680 |
+// rules, even if there are alternate valid encodings for the same message:
|
|
681 |
+//
|
|
682 |
+// * Fields are serialized in tag order.
|
|
683 |
+// * There are no unknown fields.
|
|
684 |
+// * There are no duplicate fields.
|
|
685 |
+// * Fields are serialized according to the default semantics for their type.
|
|
664 | 686 |
//
|
665 | 687 |
// Most protocol buffer implementations will always follow these rules when
|
666 | 688 |
// serializing, but care should be taken to avoid shortcuts. For instance,
|
... | ... | @@ -727,7 +749,7 @@ message ActionResult { |
727 | 749 |
// The output files of the action that are symbolic links to other files. Those
|
728 | 750 |
// may be links to other output files, or input files, or even absolute paths
|
729 | 751 |
// outside of the working directory, if the server supports
|
730 |
- // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy].
|
|
752 |
+ // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
|
|
731 | 753 |
// For each output file requested in the `output_files` field of the Action,
|
732 | 754 |
// if the corresponding file existed after
|
733 | 755 |
// the action completed, a single entry will be present either in this field,
|
... | ... | @@ -804,7 +826,7 @@ message ActionResult { |
804 | 826 |
// directories. Those may be links to other output directories, or input
|
805 | 827 |
// directories, or even absolute paths outside of the working directory,
|
806 | 828 |
// if the server supports
|
807 |
- // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.SymlinkAbsolutePathStrategy].
|
|
829 |
+ // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
|
|
808 | 830 |
// For each output directory requested in the `output_directories` field of
|
809 | 831 |
// the Action, if the directory file existed after
|
810 | 832 |
// the action completed, a single entry will be present either in this field,
|
... | ... | @@ -961,9 +983,19 @@ message ExecuteRequest { |
961 | 983 |
// omitted.
|
962 | 984 |
string instance_name = 1;
|
963 | 985 |
|
964 |
- // If true, the action will be executed anew even if its result was already
|
|
965 |
- // present in the cache. If false, the result may be served from the
|
|
966 |
- // [ActionCache][build.bazel.remote.execution.v2.ActionCache].
|
|
986 |
+ // If true, the action will be executed even if its result is already
|
|
987 |
+ // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache].
|
|
988 |
+ // The execution is still allowed to be merged with other in-flight executions
|
|
989 |
+ // of the same action, however - semantically, the service MUST only guarantee
|
|
990 |
+ // that the results of an execution with this field set were not visible
|
|
991 |
+ // before the corresponding execution request was sent.
|
|
992 |
+ // Note that actions from execution requests setting this field set are still
|
|
993 |
+ // eligible to be entered into the action cache upon completion, and services
|
|
994 |
+ // SHOULD overwrite any existing entries that may exist. This allows
|
|
995 |
+ // skip_cache_lookup requests to be used as a mechanism for replacing action
|
|
996 |
+ // cache entries that reference outputs no longer available or that are
|
|
997 |
+ // poisoned in any way.
|
|
998 |
+ // If false, the result may be served from the action cache.
|
|
967 | 999 |
bool skip_cache_lookup = 3;
|
968 | 1000 |
|
969 | 1001 |
reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
|
... | ... | @@ -1027,6 +1059,10 @@ message ExecuteResponse { |
1027 | 1059 |
// phase. The keys SHOULD be human readable so that a client can display them
|
1028 | 1060 |
// to a user.
|
1029 | 1061 |
map<string, LogFile> server_logs = 4;
|
1062 |
+ |
|
1063 |
+ // Freeform informational message with details on the execution of the action
|
|
1064 |
+ // that may be displayed to the user upon failure or when requested explicitly.
|
|
1065 |
+ string message = 5;
|
|
1030 | 1066 |
}
|
1031 | 1067 |
|
1032 | 1068 |
// Metadata about an ongoing
|
... | ... | @@ -1072,7 +1108,7 @@ message ExecuteOperationMetadata { |
1072 | 1108 |
// A request message for
|
1073 | 1109 |
// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
|
1074 | 1110 |
message WaitExecutionRequest {
|
1075 |
- // The name of the [Operation][google.longrunning.operations.v1.Operation]
|
|
1111 |
+ // The name of the [Operation][google.longrunning.Operation]
|
|
1076 | 1112 |
// returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
|
1077 | 1113 |
string name = 1;
|
1078 | 1114 |
}
|
... | ... | @@ -1193,7 +1229,7 @@ message BatchReadBlobsRequest { |
1193 | 1229 |
// A response message for
|
1194 | 1230 |
// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
|
1195 | 1231 |
message BatchReadBlobsResponse {
|
1196 |
- // A response corresponding to a single blob that the client tried to upload.
|
|
1232 |
+ // A response corresponding to a single blob that the client tried to download.
|
|
1197 | 1233 |
message Response {
|
1198 | 1234 |
// The digest to which this response corresponds.
|
1199 | 1235 |
Digest digest = 1;
|
... | ... | @@ -1251,7 +1287,7 @@ message GetTreeResponse { |
1251 | 1287 |
}
|
1252 | 1288 |
|
1253 | 1289 |
// A request message for
|
1254 |
-// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
|
|
1290 |
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
|
|
1255 | 1291 |
message GetCapabilitiesRequest {
|
1256 | 1292 |
// The instance of the execution system to operate against. A server may
|
1257 | 1293 |
// support multiple instances of the execution system (with their own workers,
|
... | ... | @@ -1262,7 +1298,7 @@ message GetCapabilitiesRequest { |
1262 | 1298 |
}
|
1263 | 1299 |
|
1264 | 1300 |
// A response message for
|
1265 |
-// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
|
|
1301 |
+// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
|
|
1266 | 1302 |
message ServerCapabilities {
|
1267 | 1303 |
// Capabilities of the remote cache system.
|
1268 | 1304 |
CacheCapabilities cache_capabilities = 1;
|
... | ... | @@ -1283,9 +1319,16 @@ message ServerCapabilities { |
1283 | 1319 |
// The digest function used for converting values into keys for CAS and Action
|
1284 | 1320 |
// Cache.
|
1285 | 1321 |
enum DigestFunction {
|
1322 |
+ // It is an error for the server to return this value.
|
|
1286 | 1323 |
UNKNOWN = 0;
|
1324 |
+ |
|
1325 |
+ // The Sha-256 digest function.
|
|
1287 | 1326 |
SHA256 = 1;
|
1327 |
+ |
|
1328 |
+ // The Sha-1 digest function.
|
|
1288 | 1329 |
SHA1 = 2;
|
1330 |
+ |
|
1331 |
+ // The MD5 digest function.
|
|
1289 | 1332 |
MD5 = 3;
|
1290 | 1333 |
}
|
1291 | 1334 |
|
... | ... | @@ -1312,9 +1355,10 @@ message CacheCapabilities { |
1312 | 1355 |
enum SymlinkAbsolutePathStrategy {
|
1313 | 1356 |
UNKNOWN = 0;
|
1314 | 1357 |
|
1315 |
- // Server will return an INVALID_ARGUMENT on input symlinks with absolute targets.
|
|
1358 |
+ // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute
|
|
1359 |
+ // targets.
|
|
1316 | 1360 |
// If an action tries to create an output symlink with an absolute target, a
|
1317 |
- // FAILED_PRECONDITION will be returned.
|
|
1361 |
+ // `FAILED_PRECONDITION` will be returned.
|
|
1318 | 1362 |
DISALLOWED = 1;
|
1319 | 1363 |
|
1320 | 1364 |
// Server will allow symlink targets to escape the input root tree, possibly
|
... | ... | @@ -1367,8 +1411,9 @@ message ToolDetails { |
1367 | 1411 |
// external context of the request. The server may use this for logging or other
|
1368 | 1412 |
// purposes. To use it, the client attaches the header to the call using the
|
1369 | 1413 |
// canonical proto serialization:
|
1370 |
-// name: build.bazel.remote.execution.v2.requestmetadata-bin
|
|
1371 |
-// contents: the base64 encoded binary RequestMetadata message.
|
|
1414 |
+//
|
|
1415 |
+// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
|
|
1416 |
+// * contents: the base64 encoded binary `RequestMetadata` message.
|
|
1372 | 1417 |
message RequestMetadata {
|
1373 | 1418 |
// The details for the tool invoking the requests.
|
1374 | 1419 |
ToolDetails tool_details = 1;
|
... | ... | @@ -26,7 +26,7 @@ DESCRIPTOR = _descriptor.FileDescriptor( |
26 | 26 |
package='build.bazel.remote.execution.v2',
|
27 | 27 |
syntax='proto3',
|
28 | 28 |
serialized_options=_b('\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'),
|
29 |
- serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd6\x04\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x14output_file_symlinks\x18\n \x03(\x0b\x32..build.bazel.remote.execution.v2.OutputSymlink\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12Q\n\x19output_directory_symlinks\x18\x0b \x03(\x0b\x32..build.bazel.remote.execution.v2.OutputSymlink\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"-\n\rOutputSymlink\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xbf\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xd6\x01\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12R\n\x08requests\x18\x02 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request\x1aP\n\x07Request\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xda\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"h\n\x15\x42\x61tchReadBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x38\n\x07\x64igests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe4\x01\n\x16\x42\x61tchReadBlobsResponse\x12S\n\tresponses\x18\x01 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response\x1au\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\x88\x04\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\x12\"\n\x1amax_batch_total_size_bytes\x18\x04 \x01(\x03\x12v\n\x1esymlink_absolute_path_strategy\x18\x05 \x01(\x0e\x32N.build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy\"G\n\x1bSymlinkAbsolutePathStrategy\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nDISALLOWED\x10\x01\x12\x0b\n\x07\x41LLOWED\x10\x02\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x9b\x06\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xb4\x01\n\x0e\x42\x61tchReadBlobs\x12\x36.build.bazel.remote.execution.v2.BatchReadBlobsRequest\x1a\x37.build.bazel.remote.execution.v2.BatchReadBlobsResponse\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/blobs:batchRead:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3')
|
|
29 |
+ serialized_pb=_b('\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd6\x04\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x14output_file_symlinks\x18\n \x03(\x0b\x32..build.bazel.remote.execution.v2.OutputSymlink\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12Q\n\x19output_directory_symlinks\x18\x0b \x03(\x0b\x32..build.bazel.remote.execution.v2.OutputSymlink\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"-\n\rOutputSymlink\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xd0\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x12\x0f\n\x07message\x18\x05 \x01(\t\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xd6\x01\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12R\n\x08requests\x18\x02 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request\x1aP\n\x07Request\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xda\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"h\n\x15\x42\x61tchReadBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x38\n\x07\x64igests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe4\x01\n\x16\x42\x61tchReadBlobsResponse\x12S\n\tresponses\x18\x01 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response\x1au\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\x88\x04\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\x12\"\n\x1amax_batch_total_size_bytes\x18\x04 \x01(\x03\x12v\n\x1esymlink_absolute_path_strategy\x18\x05 \x01(\x0e\x32N.build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy\"G\n\x1bSymlinkAbsolutePathStrategy\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nDISALLOWED\x10\x01\x12\x0b\n\x07\x41LLOWED\x10\x02\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x9b\x06\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xb4\x01\n\x0e\x42\x61tchReadBlobs\x12\x36.build.bazel.remote.execution.v2.BatchReadBlobsRequest\x1a\x37.build.bazel.remote.execution.v2.BatchReadBlobsResponse\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/blobs:batchRead:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3')
|
|
30 | 30 |
,
|
31 | 31 |
dependencies=[build_dot_bazel_dot_semver_dot_semver__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
|
32 | 32 |
|
... | ... | @@ -55,8 +55,8 @@ _DIGESTFUNCTION = _descriptor.EnumDescriptor( |
55 | 55 |
],
|
56 | 56 |
containing_type=None,
|
57 | 57 |
serialized_options=None,
|
58 |
- serialized_start=7421,
|
|
59 |
- serialized_end=7481,
|
|
58 |
+ serialized_start=7438,
|
|
59 |
+ serialized_end=7498,
|
|
60 | 60 |
)
|
61 | 61 |
_sym_db.RegisterEnumDescriptor(_DIGESTFUNCTION)
|
62 | 62 |
|
... | ... | @@ -96,8 +96,8 @@ _EXECUTEOPERATIONMETADATA_STAGE = _descriptor.EnumDescriptor( |
96 | 96 |
],
|
97 | 97 |
containing_type=None,
|
98 | 98 |
serialized_options=None,
|
99 |
- serialized_start=4074,
|
|
100 |
- serialized_end=4153,
|
|
99 |
+ serialized_start=4091,
|
|
100 |
+ serialized_end=4170,
|
|
101 | 101 |
)
|
102 | 102 |
_sym_db.RegisterEnumDescriptor(_EXECUTEOPERATIONMETADATA_STAGE)
|
103 | 103 |
|
... | ... | @@ -122,8 +122,8 @@ _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _descriptor.EnumDescriptor( |
122 | 122 |
],
|
123 | 123 |
containing_type=None,
|
124 | 124 |
serialized_options=None,
|
125 |
- serialized_start=6904,
|
|
126 |
- serialized_end=6975,
|
|
125 |
+ serialized_start=6921,
|
|
126 |
+ serialized_end=6992,
|
|
127 | 127 |
)
|
128 | 128 |
_sym_db.RegisterEnumDescriptor(_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY)
|
129 | 129 |
|
... | ... | @@ -1094,8 +1094,8 @@ _EXECUTERESPONSE_SERVERLOGSENTRY = _descriptor.Descriptor( |
1094 | 1094 |
extension_ranges=[],
|
1095 | 1095 |
oneofs=[
|
1096 | 1096 |
],
|
1097 |
- serialized_start=3752,
|
|
1098 |
- serialized_end=3843,
|
|
1097 |
+ serialized_start=3769,
|
|
1098 |
+ serialized_end=3860,
|
|
1099 | 1099 |
)
|
1100 | 1100 |
|
1101 | 1101 |
_EXECUTERESPONSE = _descriptor.Descriptor(
|
... | ... | @@ -1133,6 +1133,13 @@ _EXECUTERESPONSE = _descriptor.Descriptor( |
1133 | 1133 |
message_type=None, enum_type=None, containing_type=None,
|
1134 | 1134 |
is_extension=False, extension_scope=None,
|
1135 | 1135 |
serialized_options=None, file=DESCRIPTOR),
|
1136 |
+ _descriptor.FieldDescriptor(
|
|
1137 |
+ name='message', full_name='build.bazel.remote.execution.v2.ExecuteResponse.message', index=4,
|
|
1138 |
+ number=5, type=9, cpp_type=9, label=1,
|
|
1139 |
+ has_default_value=False, default_value=_b("").decode('utf-8'),
|
|
1140 |
+ message_type=None, enum_type=None, containing_type=None,
|
|
1141 |
+ is_extension=False, extension_scope=None,
|
|
1142 |
+ serialized_options=None, file=DESCRIPTOR),
|
|
1136 | 1143 |
],
|
1137 | 1144 |
extensions=[
|
1138 | 1145 |
],
|
... | ... | @@ -1146,7 +1153,7 @@ _EXECUTERESPONSE = _descriptor.Descriptor( |
1146 | 1153 |
oneofs=[
|
1147 | 1154 |
],
|
1148 | 1155 |
serialized_start=3524,
|
1149 |
- serialized_end=3843,
|
|
1156 |
+ serialized_end=3860,
|
|
1150 | 1157 |
)
|
1151 | 1158 |
|
1152 | 1159 |
|
... | ... | @@ -1198,8 +1205,8 @@ _EXECUTEOPERATIONMETADATA = _descriptor.Descriptor( |
1198 | 1205 |
extension_ranges=[],
|
1199 | 1206 |
oneofs=[
|
1200 | 1207 |
],
|
1201 |
- serialized_start=3846,
|
|
1202 |
- serialized_end=4153,
|
|
1208 |
+ serialized_start=3863,
|
|
1209 |
+ serialized_end=4170,
|
|
1203 | 1210 |
)
|
1204 | 1211 |
|
1205 | 1212 |
|
... | ... | @@ -1229,8 +1236,8 @@ _WAITEXECUTIONREQUEST = _descriptor.Descriptor( |
1229 | 1236 |
extension_ranges=[],
|
1230 | 1237 |
oneofs=[
|
1231 | 1238 |
],
|
1232 |
- serialized_start=4155,
|
|
1233 |
- serialized_end=4191,
|
|
1239 |
+ serialized_start=4172,
|
|
1240 |
+ serialized_end=4208,
|
|
1234 | 1241 |
)
|
1235 | 1242 |
|
1236 | 1243 |
|
... | ... | @@ -1267,8 +1274,8 @@ _GETACTIONRESULTREQUEST = _descriptor.Descriptor( |
1267 | 1274 |
extension_ranges=[],
|
1268 | 1275 |
oneofs=[
|
1269 | 1276 |
],
|
1270 |
- serialized_start=4193,
|
|
1271 |
- serialized_end=4304,
|
|
1277 |
+ serialized_start=4210,
|
|
1278 |
+ serialized_end=4321,
|
|
1272 | 1279 |
)
|
1273 | 1280 |
|
1274 | 1281 |
|
... | ... | @@ -1319,8 +1326,8 @@ _UPDATEACTIONRESULTREQUEST = _descriptor.Descriptor( |
1319 | 1326 |
extension_ranges=[],
|
1320 | 1327 |
oneofs=[
|
1321 | 1328 |
],
|
1322 |
- serialized_start=4307,
|
|
1323 |
- serialized_end=4574,
|
|
1329 |
+ serialized_start=4324,
|
|
1330 |
+ serialized_end=4591,
|
|
1324 | 1331 |
)
|
1325 | 1332 |
|
1326 | 1333 |
|
... | ... | @@ -1357,8 +1364,8 @@ _FINDMISSINGBLOBSREQUEST = _descriptor.Descriptor( |
1357 | 1364 |
extension_ranges=[],
|
1358 | 1365 |
oneofs=[
|
1359 | 1366 |
],
|
1360 |
- serialized_start=4576,
|
|
1361 |
- serialized_end=4687,
|
|
1367 |
+ serialized_start=4593,
|
|
1368 |
+ serialized_end=4704,
|
|
1362 | 1369 |
)
|
1363 | 1370 |
|
1364 | 1371 |
|
... | ... | @@ -1388,8 +1395,8 @@ _FINDMISSINGBLOBSRESPONSE = _descriptor.Descriptor( |
1388 | 1395 |
extension_ranges=[],
|
1389 | 1396 |
oneofs=[
|
1390 | 1397 |
],
|
1391 |
- serialized_start=4689,
|
|
1392 |
- serialized_end=4786,
|
|
1398 |
+ serialized_start=4706,
|
|
1399 |
+ serialized_end=4803,
|
|
1393 | 1400 |
)
|
1394 | 1401 |
|
1395 | 1402 |
|
... | ... | @@ -1426,8 +1433,8 @@ _BATCHUPDATEBLOBSREQUEST_REQUEST = _descriptor.Descriptor( |
1426 | 1433 |
extension_ranges=[],
|
1427 | 1434 |
oneofs=[
|
1428 | 1435 |
],
|
1429 |
- serialized_start=4923,
|
|
1430 |
- serialized_end=5003,
|
|
1436 |
+ serialized_start=4940,
|
|
1437 |
+ serialized_end=5020,
|
|
1431 | 1438 |
)
|
1432 | 1439 |
|
1433 | 1440 |
_BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor(
|
... | ... | @@ -1463,8 +1470,8 @@ _BATCHUPDATEBLOBSREQUEST = _descriptor.Descriptor( |
1463 | 1470 |
extension_ranges=[],
|
1464 | 1471 |
oneofs=[
|
1465 | 1472 |
],
|
1466 |
- serialized_start=4789,
|
|
1467 |
- serialized_end=5003,
|
|
1473 |
+ serialized_start=4806,
|
|
1474 |
+ serialized_end=5020,
|
|
1468 | 1475 |
)
|
1469 | 1476 |
|
1470 | 1477 |
|
... | ... | @@ -1501,8 +1508,8 @@ _BATCHUPDATEBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor( |
1501 | 1508 |
extension_ranges=[],
|
1502 | 1509 |
oneofs=[
|
1503 | 1510 |
],
|
1504 |
- serialized_start=5121,
|
|
1505 |
- serialized_end=5224,
|
|
1511 |
+ serialized_start=5138,
|
|
1512 |
+ serialized_end=5241,
|
|
1506 | 1513 |
)
|
1507 | 1514 |
|
1508 | 1515 |
_BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor(
|
... | ... | @@ -1531,8 +1538,8 @@ _BATCHUPDATEBLOBSRESPONSE = _descriptor.Descriptor( |
1531 | 1538 |
extension_ranges=[],
|
1532 | 1539 |
oneofs=[
|
1533 | 1540 |
],
|
1534 |
- serialized_start=5006,
|
|
1535 |
- serialized_end=5224,
|
|
1541 |
+ serialized_start=5023,
|
|
1542 |
+ serialized_end=5241,
|
|
1536 | 1543 |
)
|
1537 | 1544 |
|
1538 | 1545 |
|
... | ... | @@ -1569,8 +1576,8 @@ _BATCHREADBLOBSREQUEST = _descriptor.Descriptor( |
1569 | 1576 |
extension_ranges=[],
|
1570 | 1577 |
oneofs=[
|
1571 | 1578 |
],
|
1572 |
- serialized_start=5226,
|
|
1573 |
- serialized_end=5330,
|
|
1579 |
+ serialized_start=5243,
|
|
1580 |
+ serialized_end=5347,
|
|
1574 | 1581 |
)
|
1575 | 1582 |
|
1576 | 1583 |
|
... | ... | @@ -1614,8 +1621,8 @@ _BATCHREADBLOBSRESPONSE_RESPONSE = _descriptor.Descriptor( |
1614 | 1621 |
extension_ranges=[],
|
1615 | 1622 |
oneofs=[
|
1616 | 1623 |
],
|
1617 |
- serialized_start=5444,
|
|
1618 |
- serialized_end=5561,
|
|
1624 |
+ serialized_start=5461,
|
|
1625 |
+ serialized_end=5578,
|
|
1619 | 1626 |
)
|
1620 | 1627 |
|
1621 | 1628 |
_BATCHREADBLOBSRESPONSE = _descriptor.Descriptor(
|
... | ... | @@ -1644,8 +1651,8 @@ _BATCHREADBLOBSRESPONSE = _descriptor.Descriptor( |
1644 | 1651 |
extension_ranges=[],
|
1645 | 1652 |
oneofs=[
|
1646 | 1653 |
],
|
1647 |
- serialized_start=5333,
|
|
1648 |
- serialized_end=5561,
|
|
1654 |
+ serialized_start=5350,
|
|
1655 |
+ serialized_end=5578,
|
|
1649 | 1656 |
)
|
1650 | 1657 |
|
1651 | 1658 |
|
... | ... | @@ -1696,8 +1703,8 @@ _GETTREEREQUEST = _descriptor.Descriptor( |
1696 | 1703 |
extension_ranges=[],
|
1697 | 1704 |
oneofs=[
|
1698 | 1705 |
],
|
1699 |
- serialized_start=5564,
|
|
1700 |
- serialized_end=5704,
|
|
1706 |
+ serialized_start=5581,
|
|
1707 |
+ serialized_end=5721,
|
|
1701 | 1708 |
)
|
1702 | 1709 |
|
1703 | 1710 |
|
... | ... | @@ -1734,8 +1741,8 @@ _GETTREERESPONSE = _descriptor.Descriptor( |
1734 | 1741 |
extension_ranges=[],
|
1735 | 1742 |
oneofs=[
|
1736 | 1743 |
],
|
1737 |
- serialized_start=5706,
|
|
1738 |
- serialized_end=5813,
|
|
1744 |
+ serialized_start=5723,
|
|
1745 |
+ serialized_end=5830,
|
|
1739 | 1746 |
)
|
1740 | 1747 |
|
1741 | 1748 |
|
... | ... | @@ -1765,8 +1772,8 @@ _GETCAPABILITIESREQUEST = _descriptor.Descriptor( |
1765 | 1772 |
extension_ranges=[],
|
1766 | 1773 |
oneofs=[
|
1767 | 1774 |
],
|
1768 |
- serialized_start=5815,
|
|
1769 |
- serialized_end=5862,
|
|
1775 |
+ serialized_start=5832,
|
|
1776 |
+ serialized_end=5879,
|
|
1770 | 1777 |
)
|
1771 | 1778 |
|
1772 | 1779 |
|
... | ... | @@ -1824,8 +1831,8 @@ _SERVERCAPABILITIES = _descriptor.Descriptor( |
1824 | 1831 |
extension_ranges=[],
|
1825 | 1832 |
oneofs=[
|
1826 | 1833 |
],
|
1827 |
- serialized_start=5865,
|
|
1828 |
- serialized_end=6220,
|
|
1834 |
+ serialized_start=5882,
|
|
1835 |
+ serialized_end=6237,
|
|
1829 | 1836 |
)
|
1830 | 1837 |
|
1831 | 1838 |
|
... | ... | @@ -1855,8 +1862,8 @@ _ACTIONCACHEUPDATECAPABILITIES = _descriptor.Descriptor( |
1855 | 1862 |
extension_ranges=[],
|
1856 | 1863 |
oneofs=[
|
1857 | 1864 |
],
|
1858 |
- serialized_start=6222,
|
|
1859 |
- serialized_end=6277,
|
|
1865 |
+ serialized_start=6239,
|
|
1866 |
+ serialized_end=6294,
|
|
1860 | 1867 |
)
|
1861 | 1868 |
|
1862 | 1869 |
|
... | ... | @@ -1893,8 +1900,8 @@ _PRIORITYCAPABILITIES_PRIORITYRANGE = _descriptor.Descriptor( |
1893 | 1900 |
extension_ranges=[],
|
1894 | 1901 |
oneofs=[
|
1895 | 1902 |
],
|
1896 |
- serialized_start=6393,
|
|
1897 |
- serialized_end=6452,
|
|
1903 |
+ serialized_start=6410,
|
|
1904 |
+ serialized_end=6469,
|
|
1898 | 1905 |
)
|
1899 | 1906 |
|
1900 | 1907 |
_PRIORITYCAPABILITIES = _descriptor.Descriptor(
|
... | ... | @@ -1923,8 +1930,8 @@ _PRIORITYCAPABILITIES = _descriptor.Descriptor( |
1923 | 1930 |
extension_ranges=[],
|
1924 | 1931 |
oneofs=[
|
1925 | 1932 |
],
|
1926 |
- serialized_start=6280,
|
|
1927 |
- serialized_end=6452,
|
|
1933 |
+ serialized_start=6297,
|
|
1934 |
+ serialized_end=6469,
|
|
1928 | 1935 |
)
|
1929 | 1936 |
|
1930 | 1937 |
|
... | ... | @@ -1983,8 +1990,8 @@ _CACHECAPABILITIES = _descriptor.Descriptor( |
1983 | 1990 |
extension_ranges=[],
|
1984 | 1991 |
oneofs=[
|
1985 | 1992 |
],
|
1986 |
- serialized_start=6455,
|
|
1987 |
- serialized_end=6975,
|
|
1993 |
+ serialized_start=6472,
|
|
1994 |
+ serialized_end=6992,
|
|
1988 | 1995 |
)
|
1989 | 1996 |
|
1990 | 1997 |
|
... | ... | @@ -2028,8 +2035,8 @@ _EXECUTIONCAPABILITIES = _descriptor.Descriptor( |
2028 | 2035 |
extension_ranges=[],
|
2029 | 2036 |
oneofs=[
|
2030 | 2037 |
],
|
2031 |
- serialized_start=6978,
|
|
2032 |
- serialized_end=7193,
|
|
2038 |
+ serialized_start=6995,
|
|
2039 |
+ serialized_end=7210,
|
|
2033 | 2040 |
)
|
2034 | 2041 |
|
2035 | 2042 |
|
... | ... | @@ -2066,8 +2073,8 @@ _TOOLDETAILS = _descriptor.Descriptor( |
2066 | 2073 |
extension_ranges=[],
|
2067 | 2074 |
oneofs=[
|
2068 | 2075 |
],
|
2069 |
- serialized_start=7195,
|
|
2070 |
- serialized_end=7249,
|
|
2076 |
+ serialized_start=7212,
|
|
2077 |
+ serialized_end=7266,
|
|
2071 | 2078 |
)
|
2072 | 2079 |
|
2073 | 2080 |
|
... | ... | @@ -2118,8 +2125,8 @@ _REQUESTMETADATA = _descriptor.Descriptor( |
2118 | 2125 |
extension_ranges=[],
|
2119 | 2126 |
oneofs=[
|
2120 | 2127 |
],
|
2121 |
- serialized_start=7252,
|
|
2122 |
- serialized_end=7419,
|
|
2128 |
+ serialized_start=7269,
|
|
2129 |
+ serialized_end=7436,
|
|
2123 | 2130 |
)
|
2124 | 2131 |
|
2125 | 2132 |
_ACTION.fields_by_name['command_digest'].message_type = _DIGEST
|
... | ... | @@ -2583,8 +2590,8 @@ _EXECUTION = _descriptor.ServiceDescriptor( |
2583 | 2590 |
file=DESCRIPTOR,
|
2584 | 2591 |
index=0,
|
2585 | 2592 |
serialized_options=None,
|
2586 |
- serialized_start=7484,
|
|
2587 |
- serialized_end=7797,
|
|
2593 |
+ serialized_start=7501,
|
|
2594 |
+ serialized_end=7814,
|
|
2588 | 2595 |
methods=[
|
2589 | 2596 |
_descriptor.MethodDescriptor(
|
2590 | 2597 |
name='Execute',
|
... | ... | @@ -2616,8 +2623,8 @@ _ACTIONCACHE = _descriptor.ServiceDescriptor( |
2616 | 2623 |
file=DESCRIPTOR,
|
2617 | 2624 |
index=1,
|
2618 | 2625 |
serialized_options=None,
|
2619 |
- serialized_start=7800,
|
|
2620 |
- serialized_end=8270,
|
|
2626 |
+ serialized_start=7817,
|
|
2627 |
+ serialized_end=8287,
|
|
2621 | 2628 |
methods=[
|
2622 | 2629 |
_descriptor.MethodDescriptor(
|
2623 | 2630 |
name='GetActionResult',
|
... | ... | @@ -2649,8 +2656,8 @@ _CONTENTADDRESSABLESTORAGE = _descriptor.ServiceDescriptor( |
2649 | 2656 |
file=DESCRIPTOR,
|
2650 | 2657 |
index=2,
|
2651 | 2658 |
serialized_options=None,
|
2652 |
- serialized_start=8273,
|
|
2653 |
- serialized_end=9068,
|
|
2659 |
+ serialized_start=8290,
|
|
2660 |
+ serialized_end=9085,
|
|
2654 | 2661 |
methods=[
|
2655 | 2662 |
_descriptor.MethodDescriptor(
|
2656 | 2663 |
name='FindMissingBlobs',
|
... | ... | @@ -2700,8 +2707,8 @@ _CAPABILITIES = _descriptor.ServiceDescriptor( |
2700 | 2707 |
file=DESCRIPTOR,
|
2701 | 2708 |
index=3,
|
2702 | 2709 |
serialized_options=None,
|
2703 |
- serialized_start=9071,
|
|
2704 |
- serialized_end=9260,
|
|
2710 |
+ serialized_start=9088,
|
|
2711 |
+ serialized_end=9277,
|
|
2705 | 2712 |
methods=[
|
2706 | 2713 |
_descriptor.MethodDescriptor(
|
2707 | 2714 |
name='GetCapabilities',
|
... | ... | @@ -87,6 +87,7 @@ class ExecutionServicer(object): |
87 | 87 |
action will be reported in the `status` field of the `ExecuteResponse`. The
|
88 | 88 |
server MUST NOT set the `error` field of the `Operation` proto.
|
89 | 89 |
The possible errors include:
|
90 |
+ |
|
90 | 91 |
* `INVALID_ARGUMENT`: One or more arguments are invalid.
|
91 | 92 |
* `FAILED_PRECONDITION`: One or more errors occurred in setting up the
|
92 | 93 |
action requested, such as a missing input or command or no worker being
|
... | ... | @@ -210,6 +211,7 @@ class ActionCacheServicer(object): |
210 | 211 |
"""Retrieve a cached execution result.
|
211 | 212 |
|
212 | 213 |
Errors:
|
214 |
+ |
|
213 | 215 |
* `NOT_FOUND`: The requested `ActionResult` is not in the cache.
|
214 | 216 |
"""
|
215 | 217 |
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
... | ... | @@ -219,11 +221,6 @@ class ActionCacheServicer(object): |
219 | 221 |
def UpdateActionResult(self, request, context):
|
220 | 222 |
"""Upload a new execution result.
|
221 | 223 |
|
222 |
- This method is intended for servers which implement the distributed cache
|
|
223 |
- independently of the
|
|
224 |
- [Execution][build.bazel.remote.execution.v2.Execution] API. As a
|
|
225 |
- result, it is OPTIONAL for servers to implement.
|
|
226 |
- |
|
227 | 224 |
In order to allow the server to perform access control based on the type of
|
228 | 225 |
action, and to assist with client debugging, the client MUST first upload
|
229 | 226 |
the [Action][build.bazel.remote.execution.v2.Execution] that produced the
|
... | ... | @@ -232,7 +229,10 @@ class ActionCacheServicer(object): |
232 | 229 |
`ContentAddressableStorage`.
|
233 | 230 |
|
234 | 231 |
Errors:
|
235 |
- * `UNIMPLEMENTED`: This method is not supported by the server.
|
|
232 |
+ |
|
233 |
+ * `INVALID_ARGUMENT`: One or more arguments are invalid.
|
|
234 |
+ * `FAILED_PRECONDITION`: One or more errors occurred in updating the
|
|
235 |
+ action result, such as a missing command or action.
|
|
236 | 236 |
* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
|
237 | 237 |
entry to the cache.
|
238 | 238 |
"""
|
... | ... | @@ -299,6 +299,9 @@ class ContentAddressableStorageStub(object): |
299 | 299 |
by the server. For servers which do not support multiple instances, then the
|
300 | 300 |
`instance_name` is the empty path and the leading slash is omitted, so that
|
301 | 301 |
the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
|
302 |
+ To simplify parsing, a path segment cannot equal any of the following
|
|
303 |
+ keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
|
|
304 |
+ `capabilities`.
|
|
302 | 305 |
|
303 | 306 |
When attempting an upload, if another client has already completed the upload
|
304 | 307 |
(which may occur in the middle of a single upload if another client uploads
|
... | ... | @@ -395,6 +398,9 @@ class ContentAddressableStorageServicer(object): |
395 | 398 |
by the server. For servers which do not support multiple instances, then the
|
396 | 399 |
`instance_name` is the empty path and the leading slash is omitted, so that
|
397 | 400 |
the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
|
401 |
+ To simplify parsing, a path segment cannot equal any of the following
|
|
402 |
+ keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations` and
|
|
403 |
+ `capabilities`.
|
|
398 | 404 |
|
399 | 405 |
When attempting an upload, if another client has already completed the upload
|
400 | 406 |
(which may occur in the middle of a single upload if another client uploads
|
... | ... | @@ -450,10 +456,12 @@ class ContentAddressableStorageServicer(object): |
450 | 456 |
independently.
|
451 | 457 |
|
452 | 458 |
Errors:
|
459 |
+ |
|
453 | 460 |
* `INVALID_ARGUMENT`: The client attempted to upload more than the
|
454 | 461 |
server supported limit.
|
455 | 462 |
|
456 | 463 |
Individual requests may return the following errors, additionally:
|
464 |
+ |
|
457 | 465 |
* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
|
458 | 466 |
* `INVALID_ARGUMENT`: The
|
459 | 467 |
[Digest][build.bazel.remote.execution.v2.Digest] does not match the
|
... | ... | @@ -478,6 +486,7 @@ class ContentAddressableStorageServicer(object): |
478 | 486 |
independently.
|
479 | 487 |
|
480 | 488 |
Errors:
|
489 |
+ |
|
481 | 490 |
* `INVALID_ARGUMENT`: The client attempted to read more than the
|
482 | 491 |
server supported limit.
|
483 | 492 |
|
... | ... | @@ -23,9 +23,19 @@ option java_outer_classname = "SemverProto"; |
23 | 23 |
option java_package = "build.bazel.semver";
|
24 | 24 |
option objc_class_prefix = "SMV";
|
25 | 25 |
|
26 |
+// The full version of a given tool.
|
|
26 | 27 |
message SemVer {
|
28 |
+ // The major version, e.g 10 for 10.2.3.
|
|
27 | 29 |
int32 major = 1;
|
30 |
+ |
|
31 |
+ // The minor version, e.g. 2 for 10.2.3.
|
|
28 | 32 |
int32 minor = 2;
|
33 |
+ |
|
34 |
+ // The patch version, e.g 3 for 10.2.3.
|
|
29 | 35 |
int32 patch = 3;
|
36 |
+ |
|
37 |
+ // The pre-release version. Either this field or major/minor/patch fields
|
|
38 |
+ // must be filled. They are mutually exclusive. Pre-release versions are
|
|
39 |
+ // assumed to be earlier than any released versions.
|
|
30 | 40 |
string prerelease = 4;
|
31 | 41 |
}
|
... | ... | @@ -24,11 +24,10 @@ from buildgrid._exceptions import NotFoundError |
24 | 24 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
25 | 25 |
from buildgrid._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
|
26 | 26 |
from buildgrid._protos.google.rpc import code_pb2
|
27 |
-from buildgrid.settings import HASH, MAX_REQUEST_SIZE, MAX_REQUEST_COUNT
|
|
27 |
+from buildgrid.client.capabilities import CapabilitiesInterface
|
|
28 |
+from buildgrid.settings import HASH, MAX_REQUEST_SIZE, MAX_REQUEST_COUNT, BATCH_REQUEST_SIZE_THRESHOLD
|
|
28 | 29 |
from buildgrid.utils import create_digest, merkle_tree_maker
|
29 | 30 |
|
30 |
-# Maximum size for a queueable file:
|
|
31 |
-FILE_SIZE_THRESHOLD = 1 * 1024 * 1024
|
|
32 | 31 |
|
33 | 32 |
_FileRequest = namedtuple('FileRequest', ['digest', 'output_paths'])
|
34 | 33 |
|
... | ... | @@ -50,6 +49,67 @@ class _CallCache: |
50 | 49 |
return name in cls.__calls[channel]
|
51 | 50 |
|
52 | 51 |
|
52 |
+class _CasBatchRequestSizesCache:
|
|
53 |
+ """Cache that stores, for each remote, the limit of bytes that can
|
|
54 |
+ be transferred using batches as well as a threshold that determines
|
|
55 |
+ when a file can be fetched as part of a batch request.
|
|
56 |
+ """
|
|
57 |
+ __cas_max_batch_transfer_size = {}
|
|
58 |
+ __cas_batch_request_size_threshold = {}
|
|
59 |
+ |
|
60 |
+ @classmethod
|
|
61 |
+ def max_effective_batch_size_bytes(cls, channel, instance_name):
|
|
62 |
+ """Returns the maximum number of bytes that can be transferred
|
|
63 |
+ using batch methods for the given remote.
|
|
64 |
+ """
|
|
65 |
+ if channel not in cls.__cas_max_batch_transfer_size:
|
|
66 |
+ cls.__cas_max_batch_transfer_size[channel] = dict()
|
|
67 |
+ |
|
68 |
+ if instance_name not in cls.__cas_max_batch_transfer_size[channel]:
|
|
69 |
+ max_batch_size = cls._get_server_max_batch_total_size_bytes(channel,
|
|
70 |
+ instance_name)
|
|
71 |
+ |
|
72 |
+ cls.__cas_max_batch_transfer_size[channel][instance_name] = max_batch_size
|
|
73 |
+ |
|
74 |
+ return cls.__cas_max_batch_transfer_size[channel][instance_name]
|
|
75 |
+ |
|
76 |
+ @classmethod
|
|
77 |
+ def batch_request_size_threshold(cls, channel, instance_name):
|
|
78 |
+ if channel not in cls.__cas_batch_request_size_threshold:
|
|
79 |
+ cls.__cas_batch_request_size_threshold[channel] = dict()
|
|
80 |
+ |
|
81 |
+ if instance_name not in cls.__cas_batch_request_size_threshold[channel]:
|
|
82 |
+ # Computing the threshold:
|
|
83 |
+ max_batch_size = cls.max_effective_batch_size_bytes(channel,
|
|
84 |
+ instance_name)
|
|
85 |
+ threshold = BATCH_REQUEST_SIZE_THRESHOLD * max_batch_size
|
|
86 |
+ |
|
87 |
+ cls.__cas_batch_request_size_threshold[channel][instance_name] = threshold
|
|
88 |
+ |
|
89 |
+ return cls.__cas_batch_request_size_threshold[channel][instance_name]
|
|
90 |
+ |
|
91 |
+ @classmethod
|
|
92 |
+ def _get_server_max_batch_total_size_bytes(cls, channel, instance_name):
|
|
93 |
+ """Returns the maximum number of bytes that can be effectively
|
|
94 |
+ transferred using batches, considering the limits imposed by
|
|
95 |
+ the server's configuration and by gRPC.
|
|
96 |
+ """
|
|
97 |
+ try:
|
|
98 |
+ capabilities_interface = CapabilitiesInterface(channel)
|
|
99 |
+ server_capabilities = capabilities_interface.get_capabilities(instance_name)
|
|
100 |
+ |
|
101 |
+ cache_capabilities = server_capabilities.cache_capabilities
|
|
102 |
+ |
|
103 |
+ max_batch_total_size = cache_capabilities.max_batch_total_size_bytes
|
|
104 |
+ # The server could set this value to 0 (no limit set).
|
|
105 |
+ if max_batch_total_size:
|
|
106 |
+ return min(max_batch_total_size, MAX_REQUEST_SIZE)
|
|
107 |
+ except Exception:
|
|
108 |
+ pass
|
|
109 |
+ |
|
110 |
+ return MAX_REQUEST_SIZE
|
|
111 |
+ |
|
112 |
+ |
|
53 | 113 |
@contextmanager
|
54 | 114 |
def download(channel, instance=None, u_uid=None):
|
55 | 115 |
"""Context manager generator for the :class:`Downloader` class."""
|
... | ... | @@ -189,7 +249,7 @@ class Downloader: |
189 | 249 |
if not os.path.isabs(file_path):
|
190 | 250 |
file_path = os.path.abspath(file_path)
|
191 | 251 |
|
192 |
- if not queue or digest.size_bytes > FILE_SIZE_THRESHOLD:
|
|
252 |
+ if not queue or digest.size_bytes > self._queueable_file_size_threshold():
|
|
193 | 253 |
self._fetch_file(digest, file_path, is_executable=is_executable)
|
194 | 254 |
else:
|
195 | 255 |
self._queue_file(digest, file_path, is_executable=is_executable)
|
... | ... | @@ -334,9 +394,11 @@ class Downloader: |
334 | 394 |
|
335 | 395 |
def _queue_file(self, digest, file_path, is_executable=False):
|
336 | 396 |
"""Queues a file for later batch download"""
|
337 |
- if self.__file_request_size + digest.ByteSize() > MAX_REQUEST_SIZE:
|
|
397 |
+ batch_size_limit = self._max_effective_batch_size_bytes()
|
|
398 |
+ |
|
399 |
+ if self.__file_request_size + digest.ByteSize() > batch_size_limit:
|
|
338 | 400 |
self.flush()
|
339 |
- elif self.__file_response_size + digest.size_bytes > MAX_REQUEST_SIZE:
|
|
401 |
+ elif self.__file_response_size + digest.size_bytes > batch_size_limit:
|
|
340 | 402 |
self.flush()
|
341 | 403 |
elif self.__file_request_count >= MAX_REQUEST_COUNT:
|
342 | 404 |
self.flush()
|
... | ... | @@ -498,6 +560,20 @@ class Downloader: |
498 | 560 |
|
499 | 561 |
os.symlink(symlink_path, target_path)
|
500 | 562 |
|
563 |
+ def _max_effective_batch_size_bytes(self):
|
|
564 |
+ """Returns the effective maximum number of bytes that can be
|
|
565 |
+ transferred using batches, considering gRPC maximum message size.
|
|
566 |
+ """
|
|
567 |
+ return _CasBatchRequestSizesCache.max_effective_batch_size_bytes(self.channel,
|
|
568 |
+ self.instance_name)
|
|
569 |
+ |
|
570 |
+ def _queueable_file_size_threshold(self):
|
|
571 |
+ """Returns the size limit up until which files can be queued to
|
|
572 |
+ be requested in a batch.
|
|
573 |
+ """
|
|
574 |
+ return _CasBatchRequestSizesCache.batch_request_size_threshold(self.channel,
|
|
575 |
+ self.instance_name)
|
|
576 |
+ |
|
501 | 577 |
|
502 | 578 |
@contextmanager
|
503 | 579 |
def upload(channel, instance=None, u_uid=None):
|
... | ... | @@ -563,7 +639,8 @@ class Uploader: |
563 | 639 |
Returns:
|
564 | 640 |
:obj:`Digest`: the sent blob's digest.
|
565 | 641 |
"""
|
566 |
- if not queue or len(blob) > FILE_SIZE_THRESHOLD:
|
|
642 |
+ |
|
643 |
+ if not queue or len(blob) > self._queueable_file_size_threshold():
|
|
567 | 644 |
blob_digest = self._send_blob(blob, digest=digest)
|
568 | 645 |
else:
|
569 | 646 |
blob_digest = self._queue_blob(blob, digest=digest)
|
... | ... | @@ -589,7 +666,7 @@ class Uploader: |
589 | 666 |
"""
|
590 | 667 |
message_blob = message.SerializeToString()
|
591 | 668 |
|
592 |
- if not queue or len(message_blob) > FILE_SIZE_THRESHOLD:
|
|
669 |
+ if not queue or len(message_blob) > self._queueable_file_size_threshold():
|
|
593 | 670 |
message_digest = self._send_blob(message_blob, digest=digest)
|
594 | 671 |
else:
|
595 | 672 |
message_digest = self._queue_blob(message_blob, digest=digest)
|
... | ... | @@ -622,7 +699,7 @@ class Uploader: |
622 | 699 |
with open(file_path, 'rb') as bytes_steam:
|
623 | 700 |
file_bytes = bytes_steam.read()
|
624 | 701 |
|
625 |
- if not queue or len(file_bytes) > FILE_SIZE_THRESHOLD:
|
|
702 |
+ if not queue or len(file_bytes) > self._queueable_file_size_threshold():
|
|
626 | 703 |
file_digest = self._send_blob(file_bytes)
|
627 | 704 |
else:
|
628 | 705 |
file_digest = self._queue_blob(file_bytes)
|
... | ... | @@ -795,7 +872,12 @@ class Uploader: |
795 | 872 |
blob_digest.hash = HASH(blob).hexdigest()
|
796 | 873 |
blob_digest.size_bytes = len(blob)
|
797 | 874 |
|
798 |
- if self.__request_size + blob_digest.size_bytes > MAX_REQUEST_SIZE:
|
|
875 |
+ # If we are here queueing a file we know that its size is
|
|
876 |
+ # smaller than gRPC's message size limit.
|
|
877 |
+ # We'll make a single batch request as big as the server allows.
|
|
878 |
+ batch_size_limit = self._max_effective_batch_size_bytes()
|
|
879 |
+ |
|
880 |
+ if self.__request_size + blob_digest.size_bytes > batch_size_limit:
|
|
799 | 881 |
self.flush()
|
800 | 882 |
elif self.__request_count >= MAX_REQUEST_COUNT:
|
801 | 883 |
self.flush()
|
... | ... | @@ -851,3 +933,17 @@ class Uploader: |
851 | 933 |
written_digests.append(self._send_blob(blob, digest=digest))
|
852 | 934 |
|
853 | 935 |
return written_digests
|
936 |
+ |
|
937 |
+ def _max_effective_batch_size_bytes(self):
|
|
938 |
+ """Returns the effective maximum number of bytes that can be
|
|
939 |
+ transferred using batches, considering gRPC maximum message size.
|
|
940 |
+ """
|
|
941 |
+ return _CasBatchRequestSizesCache.max_effective_batch_size_bytes(self.channel,
|
|
942 |
+ self.instance_name)
|
|
943 |
+ |
|
944 |
+ def _queueable_file_size_threshold(self):
|
|
945 |
+ """Returns the size limit up until which files can be queued to
|
|
946 |
+ be requested in a batch.
|
|
947 |
+ """
|
|
948 |
+ return _CasBatchRequestSizesCache.batch_request_size_threshold(self.channel,
|
|
949 |
+ self.instance_name)
|
... | ... | @@ -40,12 +40,25 @@ class ActionCache(ReferenceCache): |
40 | 40 |
|
41 | 41 |
self.__logger = logging.getLogger(__name__)
|
42 | 42 |
|
43 |
+ self._instance_name = None
|
|
44 |
+ |
|
43 | 45 |
self._cache_failed_actions = cache_failed_actions
|
44 | 46 |
|
45 | 47 |
# --- Public API ---
|
46 | 48 |
|
49 |
+ @property
|
|
50 |
+ def instance_name(self):
|
|
51 |
+ return self._instance_name
|
|
52 |
+ |
|
47 | 53 |
def register_instance_with_server(self, instance_name, server):
|
48 |
- server.add_action_cache_instance(self, instance_name)
|
|
54 |
+ """Names and registers the action-cache instance with a given server."""
|
|
55 |
+ if self._instance_name is None:
|
|
56 |
+ server.add_action_cache_instance(self, instance_name)
|
|
57 |
+ |
|
58 |
+ self._instance_name = instance_name
|
|
59 |
+ |
|
60 |
+ else:
|
|
61 |
+ raise AssertionError("Instance already registered")
|
|
49 | 62 |
|
50 | 63 |
def get_action_result(self, action_digest):
|
51 | 64 |
"""Retrieves the cached result for an action."""
|
... | ... | @@ -33,16 +33,31 @@ class BotsInterface: |
33 | 33 |
def __init__(self, scheduler):
|
34 | 34 |
self.__logger = logging.getLogger(__name__)
|
35 | 35 |
|
36 |
+ self._scheduler = scheduler
|
|
37 |
+ self._instance_name = None
|
|
38 |
+ |
|
36 | 39 |
self._bot_ids = {}
|
37 | 40 |
self._assigned_leases = {}
|
38 |
- self._scheduler = scheduler
|
|
41 |
+ |
|
42 |
+ # --- Public API ---
|
|
43 |
+ |
|
44 |
+ @property
|
|
45 |
+ def instance_name(self):
|
|
46 |
+ return self._instance_name
|
|
39 | 47 |
|
40 | 48 |
@property
|
41 | 49 |
def scheduler(self):
|
42 | 50 |
return self._scheduler
|
43 | 51 |
|
44 | 52 |
def register_instance_with_server(self, instance_name, server):
|
45 |
- server.add_bots_interface(self, instance_name)
|
|
53 |
+ """Names and registers the bots interface with a given server."""
|
|
54 |
+ if self._instance_name is None:
|
|
55 |
+ server.add_bots_interface(self, instance_name)
|
|
56 |
+ |
|
57 |
+ self._instance_name = instance_name
|
|
58 |
+ |
|
59 |
+ else:
|
|
60 |
+ raise AssertionError("Instance already registered")
|
|
46 | 61 |
|
47 | 62 |
def create_bot_session(self, parent, bot_session):
|
48 | 63 |
""" Creates a new bot session. Server should assign a unique
|
... | ... | @@ -98,6 +113,8 @@ class BotsInterface: |
98 | 113 |
self._request_leases(bot_session)
|
99 | 114 |
return bot_session
|
100 | 115 |
|
116 |
+ # --- Private API ---
|
|
117 |
+ |
|
101 | 118 |
def _request_leases(self, bot_session):
|
102 | 119 |
# Only send one lease at a time currently.
|
103 | 120 |
if not bot_session.leases:
|
... | ... | @@ -16,18 +16,39 @@ |
16 | 16 |
import logging
|
17 | 17 |
|
18 | 18 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
19 |
+from buildgrid._protos.build.bazel.semver import semver_pb2
|
|
20 |
+from buildgrid.settings import HIGH_REAPI_VERSION, LOW_REAPI_VERSION
|
|
19 | 21 |
|
20 | 22 |
|
21 | 23 |
class CapabilitiesInstance:
|
22 | 24 |
|
23 | 25 |
def __init__(self, cas_instance=None, action_cache_instance=None, execution_instance=None):
|
24 | 26 |
self.__logger = logging.getLogger(__name__)
|
27 |
+ |
|
28 |
+ self._instance_name = None
|
|
29 |
+ |
|
25 | 30 |
self.__cas_instance = cas_instance
|
26 | 31 |
self.__action_cache_instance = action_cache_instance
|
27 | 32 |
self.__execution_instance = execution_instance
|
28 | 33 |
|
34 |
+ self.__high_api_version = None
|
|
35 |
+ self.__low_api_version = None
|
|
36 |
+ |
|
37 |
+ # --- Public API ---
|
|
38 |
+ |
|
39 |
+ @property
|
|
40 |
+ def instance_name(self):
|
|
41 |
+ return self._instance_name
|
|
42 |
+ |
|
29 | 43 |
def register_instance_with_server(self, instance_name, server):
|
30 |
- server.add_capabilities_instance(self, instance_name)
|
|
44 |
+ """Names and registers the capabilities instance with a given server."""
|
|
45 |
+ if self._instance_name is None:
|
|
46 |
+ server.add_capabilities_instance(self, instance_name)
|
|
47 |
+ |
|
48 |
+ self._instance_name = instance_name
|
|
49 |
+ |
|
50 |
+ else:
|
|
51 |
+ raise AssertionError("Instance already registered")
|
|
31 | 52 |
|
32 | 53 |
def add_cas_instance(self, cas_instance):
|
33 | 54 |
self.__cas_instance = cas_instance
|
... | ... | @@ -39,17 +60,24 @@ class CapabilitiesInstance: |
39 | 60 |
self.__execution_instance = execution_instance
|
40 | 61 |
|
41 | 62 |
def get_capabilities(self):
|
63 |
+ cache_capabilities = self._get_cache_capabilities()
|
|
64 |
+ execution_capabilities = self._get_capabilities_execution()
|
|
65 |
+ |
|
66 |
+ if self.__high_api_version is None:
|
|
67 |
+ self.__high_api_version = self._split_semantic_version(HIGH_REAPI_VERSION)
|
|
68 |
+ if self.__low_api_version is None:
|
|
69 |
+ self.__low_api_version = self._split_semantic_version(LOW_REAPI_VERSION)
|
|
70 |
+ |
|
42 | 71 |
server_capabilities = remote_execution_pb2.ServerCapabilities()
|
43 |
- server_capabilities.cache_capabilities.CopyFrom(self._get_cache_capabilities())
|
|
44 |
- server_capabilities.execution_capabilities.CopyFrom(self._get_capabilities_execution())
|
|
45 |
- # TODO
|
|
46 |
- # When API is stable, fill out SemVer values
|
|
47 |
- # server_capabilities.deprecated_api_version =
|
|
48 |
- # server_capabilities.low_api_version =
|
|
49 |
- # server_capabilities.low_api_version =
|
|
50 |
- # server_capabilities.hig_api_version =
|
|
72 |
+ server_capabilities.cache_capabilities.CopyFrom(cache_capabilities)
|
|
73 |
+ server_capabilities.execution_capabilities.CopyFrom(execution_capabilities)
|
|
74 |
+ server_capabilities.low_api_version.CopyFrom(self.__low_api_version)
|
|
75 |
+ server_capabilities.high_api_version.CopyFrom(self.__high_api_version)
|
|
76 |
+ |
|
51 | 77 |
return server_capabilities
|
52 | 78 |
|
79 |
+ # --- Private API ---
|
|
80 |
+ |
|
53 | 81 |
def _get_cache_capabilities(self):
|
54 | 82 |
capabilities = remote_execution_pb2.CacheCapabilities()
|
55 | 83 |
action_cache_update_capabilities = remote_execution_pb2.ActionCacheUpdateCapabilities()
|
... | ... | @@ -79,3 +107,13 @@ class CapabilitiesInstance: |
79 | 107 |
capabilities.exec_enabled = False
|
80 | 108 |
|
81 | 109 |
return capabilities
|
110 |
+ |
|
111 |
+ def _split_semantic_version(self, version_string):
|
|
112 |
+ major_version, minor_version, patch_version = version_string.split('.')
|
|
113 |
+ |
|
114 |
+ semantic_version = semver_pb2.SemVer()
|
|
115 |
+ semantic_version.major = int(major_version)
|
|
116 |
+ semantic_version.minor = int(minor_version)
|
|
117 |
+ semantic_version.patch = int(patch_version)
|
|
118 |
+ |
|
119 |
+ return semantic_version
|
... | ... | @@ -49,8 +49,17 @@ class CapabilitiesService(remote_execution_pb2_grpc.CapabilitiesServicer): |
49 | 49 |
|
50 | 50 |
@authorize(AuthContext)
|
51 | 51 |
def GetCapabilities(self, request, context):
|
52 |
+ """Handles GetCapabilitiesRequest messages.
|
|
53 |
+ |
|
54 |
+ Args:
|
|
55 |
+ request (GetCapabilitiesRequest): The incoming RPC request.
|
|
56 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
57 |
+ """
|
|
58 |
+ self.__logger.debug("GetCapabilities request from [%s]", context.peer())
|
|
59 |
+ |
|
52 | 60 |
try:
|
53 | 61 |
instance = self._get_instance(request.instance_name)
|
62 |
+ |
|
54 | 63 |
return instance.get_capabilities()
|
55 | 64 |
|
56 | 65 |
except InvalidArgumentError as e:
|
... | ... | @@ -34,10 +34,25 @@ class ContentAddressableStorageInstance: |
34 | 34 |
def __init__(self, storage):
|
35 | 35 |
self.__logger = logging.getLogger(__name__)
|
36 | 36 |
|
37 |
- self._storage = storage
|
|
37 |
+ self._instance_name = None
|
|
38 |
+ |
|
39 |
+ self.__storage = storage
|
|
40 |
+ |
|
41 |
+ # --- Public API ---
|
|
42 |
+ |
|
43 |
+ @property
|
|
44 |
+ def instance_name(self):
|
|
45 |
+ return self._instance_name
|
|
38 | 46 |
|
39 | 47 |
def register_instance_with_server(self, instance_name, server):
|
40 |
- server.add_cas_instance(self, instance_name)
|
|
48 |
+ """Names and registers the CAS instance with a given server."""
|
|
49 |
+ if self._instance_name is None:
|
|
50 |
+ server.add_cas_instance(self, instance_name)
|
|
51 |
+ |
|
52 |
+ self._instance_name = instance_name
|
|
53 |
+ |
|
54 |
+ else:
|
|
55 |
+ raise AssertionError("Instance already registered")
|
|
41 | 56 |
|
42 | 57 |
def hash_type(self):
|
43 | 58 |
return get_hash_type()
|
... | ... | @@ -51,12 +66,12 @@ class ContentAddressableStorageInstance: |
51 | 66 |
return re_pb2.CacheCapabilities().DISALLOWED
|
52 | 67 |
|
53 | 68 |
def find_missing_blobs(self, blob_digests):
|
54 |
- storage = self._storage
|
|
69 |
+ storage = self.__storage
|
|
55 | 70 |
return re_pb2.FindMissingBlobsResponse(
|
56 | 71 |
missing_blob_digests=storage.missing_blobs(blob_digests))
|
57 | 72 |
|
58 | 73 |
def batch_update_blobs(self, requests):
|
59 |
- storage = self._storage
|
|
74 |
+ storage = self.__storage
|
|
60 | 75 |
store = []
|
61 | 76 |
for request_proto in requests:
|
62 | 77 |
store.append((request_proto.digest, request_proto.data))
|
... | ... | @@ -72,7 +87,7 @@ class ContentAddressableStorageInstance: |
72 | 87 |
return response
|
73 | 88 |
|
74 | 89 |
def batch_read_blobs(self, digests):
|
75 |
- storage = self._storage
|
|
90 |
+ storage = self.__storage
|
|
76 | 91 |
|
77 | 92 |
response = re_pb2.BatchReadBlobsResponse()
|
78 | 93 |
|
... | ... | @@ -101,7 +116,7 @@ class ContentAddressableStorageInstance: |
101 | 116 |
return response
|
102 | 117 |
|
103 | 118 |
def get_tree(self, request):
|
104 |
- storage = self._storage
|
|
119 |
+ storage = self.__storage
|
|
105 | 120 |
|
106 | 121 |
response = re_pb2.GetTreeResponse()
|
107 | 122 |
page_size = request.page_size
|
... | ... | @@ -143,10 +158,25 @@ class ByteStreamInstance: |
143 | 158 |
def __init__(self, storage):
|
144 | 159 |
self.__logger = logging.getLogger(__name__)
|
145 | 160 |
|
146 |
- self._storage = storage
|
|
161 |
+ self._instance_name = None
|
|
162 |
+ |
|
163 |
+ self.__storage = storage
|
|
164 |
+ |
|
165 |
+ # --- Public API ---
|
|
166 |
+ |
|
167 |
+ @property
|
|
168 |
+ def instance_name(self):
|
|
169 |
+ return self._instance_name
|
|
147 | 170 |
|
148 | 171 |
def register_instance_with_server(self, instance_name, server):
|
149 |
- server.add_bytestream_instance(self, instance_name)
|
|
172 |
+ """Names and registers the byte-stream instance with a given server."""
|
|
173 |
+ if self._instance_name is None:
|
|
174 |
+ server.add_bytestream_instance(self, instance_name)
|
|
175 |
+ |
|
176 |
+ self._instance_name = instance_name
|
|
177 |
+ |
|
178 |
+ else:
|
|
179 |
+ raise AssertionError("Instance already registered")
|
|
150 | 180 |
|
151 | 181 |
def read(self, digest_hash, digest_size, read_offset, read_limit):
|
152 | 182 |
if len(digest_hash) != HASH_LENGTH or not digest_size.isdigit():
|
... | ... | @@ -169,7 +199,7 @@ class ByteStreamInstance: |
169 | 199 |
raise InvalidArgumentError("Negative read_limit is invalid")
|
170 | 200 |
|
171 | 201 |
# Read the blob from storage and send its contents to the client.
|
172 |
- result = self._storage.get_blob(digest)
|
|
202 |
+ result = self.__storage.get_blob(digest)
|
|
173 | 203 |
if result is None:
|
174 | 204 |
raise NotFoundError("Blob not found")
|
175 | 205 |
|
... | ... | @@ -191,7 +221,7 @@ class ByteStreamInstance: |
191 | 221 |
|
192 | 222 |
digest = re_pb2.Digest(hash=digest_hash, size_bytes=int(digest_size))
|
193 | 223 |
|
194 |
- write_session = self._storage.begin_write(digest)
|
|
224 |
+ write_session = self.__storage.begin_write(digest)
|
|
195 | 225 |
|
196 | 226 |
# Start the write session and write the first request's data.
|
197 | 227 |
write_session.write(first_block)
|
... | ... | @@ -213,6 +243,6 @@ class ByteStreamInstance: |
213 | 243 |
elif computed_hash.hexdigest() != digest.hash:
|
214 | 244 |
raise InvalidArgumentError("Data does not match hash")
|
215 | 245 |
|
216 |
- self._storage.commit_write(digest, write_session)
|
|
246 |
+ self.__storage.commit_write(digest, write_session)
|
|
217 | 247 |
|
218 | 248 |
return bytestream_pb2.WriteResponse(committed_size=bytes_written)
|
... | ... | @@ -36,19 +36,19 @@ from .operations.instance import OperationsInstance |
36 | 36 |
|
37 | 37 |
class ExecutionController:
|
38 | 38 |
|
39 |
- def __init__(self, action_cache=None, storage=None):
|
|
39 |
+ def __init__(self, storage=None, action_cache=None, action_browser_url=None):
|
|
40 | 40 |
self.__logger = logging.getLogger(__name__)
|
41 | 41 |
|
42 |
- scheduler = Scheduler(action_cache)
|
|
42 |
+ scheduler = Scheduler(action_cache, action_browser_url=action_browser_url)
|
|
43 | 43 |
|
44 | 44 |
self._execution_instance = ExecutionInstance(scheduler, storage)
|
45 | 45 |
self._bots_interface = BotsInterface(scheduler)
|
46 | 46 |
self._operations_instance = OperationsInstance(scheduler)
|
47 | 47 |
|
48 | 48 |
def register_instance_with_server(self, instance_name, server):
|
49 |
- server.add_execution_instance(self._execution_instance, instance_name)
|
|
50 |
- server.add_bots_interface(self._bots_interface, instance_name)
|
|
51 |
- server.add_operations_instance(self._operations_instance, instance_name)
|
|
49 |
+ self._execution_instance.register_instance_with_server(instance_name, server)
|
|
50 |
+ self._bots_interface.register_instance_with_server(instance_name, server)
|
|
51 |
+ self._operations_instance.register_instance_with_server(instance_name, server)
|
|
52 | 52 |
|
53 | 53 |
def stream_operation_updates(self, message_queue, operation_name):
|
54 | 54 |
operation = message_queue.get()
|
... | ... | @@ -31,15 +31,32 @@ class ExecutionInstance: |
31 | 31 |
def __init__(self, scheduler, storage):
|
32 | 32 |
self.__logger = logging.getLogger(__name__)
|
33 | 33 |
|
34 |
- self._storage = storage
|
|
35 | 34 |
self._scheduler = scheduler
|
35 |
+ self._instance_name = None
|
|
36 |
+ |
|
37 |
+ self.__storage = storage
|
|
38 |
+ |
|
39 |
+ # --- Public API ---
|
|
40 |
+ |
|
41 |
+ @property
|
|
42 |
+ def instance_name(self):
|
|
43 |
+ return self._instance_name
|
|
36 | 44 |
|
37 | 45 |
@property
|
38 | 46 |
def scheduler(self):
|
39 | 47 |
return self._scheduler
|
40 | 48 |
|
41 | 49 |
def register_instance_with_server(self, instance_name, server):
|
42 |
- server.add_execution_instance(self, instance_name)
|
|
50 |
+ """Names and registers the execution instance with a given server."""
|
|
51 |
+ if self._instance_name is None:
|
|
52 |
+ server.add_execution_instance(self, instance_name)
|
|
53 |
+ |
|
54 |
+ self._instance_name = instance_name
|
|
55 |
+ if self._scheduler is not None:
|
|
56 |
+ self._scheduler.set_instance_name(instance_name)
|
|
57 |
+ |
|
58 |
+ else:
|
|
59 |
+ raise AssertionError("Instance already registered")
|
|
43 | 60 |
|
44 | 61 |
def hash_type(self):
|
45 | 62 |
return get_hash_type()
|
... | ... | @@ -49,11 +66,12 @@ class ExecutionInstance: |
49 | 66 |
Queues an action and creates an Operation instance to be associated with
|
50 | 67 |
this action.
|
51 | 68 |
"""
|
52 |
- action = self._storage.get_message(action_digest, Action)
|
|
69 |
+ action = self.__storage.get_message(action_digest, Action)
|
|
70 |
+ |
|
53 | 71 |
if not action:
|
54 | 72 |
raise FailedPreconditionError("Could not get action from storage.")
|
55 | 73 |
|
56 |
- command = self._storage.get_message(action.command_digest, Command)
|
|
74 |
+ command = self.__storage.get_message(action.command_digest, Command)
|
|
57 | 75 |
|
58 | 76 |
if not command:
|
59 | 77 |
raise FailedPreconditionError("Could not get command from storage.")
|
... | ... | @@ -37,10 +37,8 @@ class Job: |
37 | 37 |
self._action = remote_execution_pb2.Action()
|
38 | 38 |
self._lease = None
|
39 | 39 |
|
40 |
- self.__execute_response = None
|
|
40 |
+ self.__execute_response = remote_execution_pb2.ExecuteResponse()
|
|
41 | 41 |
self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata()
|
42 |
- self.__operations_by_name = {} # Name to Operation 1:1 mapping
|
|
43 |
- self.__operations_by_peer = {} # Peer to Operation 1:1 mapping
|
|
44 | 42 |
|
45 | 43 |
self.__queued_timestamp = timestamp_pb2.Timestamp()
|
46 | 44 |
self.__queued_time_duration = duration_pb2.Duration()
|
... | ... | @@ -48,6 +46,8 @@ class Job: |
48 | 46 |
self.__worker_completed_timestamp = timestamp_pb2.Timestamp()
|
49 | 47 |
|
50 | 48 |
self.__operations_message_queues = {}
|
49 |
+ self.__operations_by_name = {} # Name to Operation 1:1 mapping
|
|
50 |
+ self.__operations_by_peer = {} # Peer to Operation 1:1 mapping
|
|
51 | 51 |
self.__operations_cancelled = set()
|
52 | 52 |
self.__lease_cancelled = False
|
53 | 53 |
self.__job_cancelled = False
|
... | ... | @@ -146,6 +146,11 @@ class Job: |
146 | 146 |
else:
|
147 | 147 |
return False
|
148 | 148 |
|
149 |
+ def set_action_url(self, url):
|
|
150 |
+ """Generates a CAS browser URL for the job's action."""
|
|
151 |
+ if url.for_message('action', self.__operation_metadata.action_digest):
|
|
152 |
+ self.__execute_response.message = url.generate()
|
|
153 |
+ |
|
149 | 154 |
def set_cached_result(self, action_result):
|
150 | 155 |
"""Allows specifying an action result form the action cache for the job.
|
151 | 156 |
|
... | ... | @@ -155,7 +160,6 @@ class Job: |
155 | 160 |
Args:
|
156 | 161 |
action_result (ActionResult): The result from cache.
|
157 | 162 |
"""
|
158 |
- self.__execute_response = remote_execution_pb2.ExecuteResponse()
|
|
159 | 163 |
self.__execute_response.result.CopyFrom(action_result)
|
160 | 164 |
self.__execute_response.cached_result = True
|
161 | 165 |
|
... | ... | @@ -445,7 +449,6 @@ class Job: |
445 | 449 |
action_metadata.worker_start_timestamp.CopyFrom(self.__worker_start_timestamp)
|
446 | 450 |
action_metadata.worker_completed_timestamp.CopyFrom(self.__worker_completed_timestamp)
|
447 | 451 |
|
448 |
- self.__execute_response = remote_execution_pb2.ExecuteResponse()
|
|
449 | 452 |
self.__execute_response.result.CopyFrom(action_result)
|
450 | 453 |
self.__execute_response.cached_result = False
|
451 | 454 |
self.__execute_response.status.CopyFrom(status)
|
... | ... | @@ -31,13 +31,27 @@ class OperationsInstance: |
31 | 31 |
self.__logger = logging.getLogger(__name__)
|
32 | 32 |
|
33 | 33 |
self._scheduler = scheduler
|
34 |
+ self._instance_name = None
|
|
35 |
+ |
|
36 |
+ # --- Public API ---
|
|
37 |
+ |
|
38 |
+ @property
|
|
39 |
+ def instance_name(self):
|
|
40 |
+ return self._instance_name
|
|
34 | 41 |
|
35 | 42 |
@property
|
36 | 43 |
def scheduler(self):
|
37 | 44 |
return self._scheduler
|
38 | 45 |
|
39 | 46 |
def register_instance_with_server(self, instance_name, server):
|
40 |
- server.add_operations_instance(self, instance_name)
|
|
47 |
+ """Names and registers the operations instance with a given server."""
|
|
48 |
+ if self._instance_name is None:
|
|
49 |
+ server.add_operations_instance(self, instance_name)
|
|
50 |
+ |
|
51 |
+ self._instance_name = instance_name
|
|
52 |
+ |
|
53 |
+ else:
|
|
54 |
+ raise AssertionError("Instance already registered")
|
|
41 | 55 |
|
42 | 56 |
def get_operation(self, job_name):
|
43 | 57 |
try:
|
... | ... | @@ -41,13 +41,29 @@ class ReferenceCache: |
41 | 41 |
"""
|
42 | 42 |
self.__logger = logging.getLogger(__name__)
|
43 | 43 |
|
44 |
+ self._instance_name = None
|
|
45 |
+ |
|
46 |
+ self.__storage = storage
|
|
47 |
+ |
|
44 | 48 |
self._allow_updates = allow_updates
|
45 |
- self._storage = storage
|
|
46 | 49 |
self._max_cached_refs = max_cached_refs
|
47 | 50 |
self._digest_map = collections.OrderedDict()
|
48 | 51 |
|
52 |
+ # --- Public API ---
|
|
53 |
+ |
|
54 |
+ @property
|
|
55 |
+ def instance_name(self):
|
|
56 |
+ return self._instance_name
|
|
57 |
+ |
|
49 | 58 |
def register_instance_with_server(self, instance_name, server):
|
50 |
- server.add_reference_storage_instance(self, instance_name)
|
|
59 |
+ """Names and registers the refs instance with a given server."""
|
|
60 |
+ if self._instance_name is None:
|
|
61 |
+ server.add_reference_storage_instance(self, instance_name)
|
|
62 |
+ |
|
63 |
+ self._instance_name = instance_name
|
|
64 |
+ |
|
65 |
+ else:
|
|
66 |
+ raise AssertionError("Instance already registered")
|
|
51 | 67 |
|
52 | 68 |
@property
|
53 | 69 |
def allow_updates(self):
|
... | ... | @@ -64,7 +80,8 @@ class ReferenceCache: |
64 | 80 |
NotFoundError.
|
65 | 81 |
"""
|
66 | 82 |
if key in self._digest_map:
|
67 |
- reference_result = self._storage.get_message(self._digest_map[key], remote_execution_pb2.Digest)
|
|
83 |
+ reference_result = self.__storage.get_message(self._digest_map[key],
|
|
84 |
+ remote_execution_pb2.Digest)
|
|
68 | 85 |
|
69 | 86 |
if reference_result is not None:
|
70 | 87 |
return reference_result
|
... | ... | @@ -84,7 +101,8 @@ class ReferenceCache: |
84 | 101 |
NotFoundError.
|
85 | 102 |
"""
|
86 | 103 |
if key in self._digest_map:
|
87 |
- reference_result = self._storage.get_message(self._digest_map[key], remote_execution_pb2.ActionResult)
|
|
104 |
+ reference_result = self.__storage.get_message(self._digest_map[key],
|
|
105 |
+ remote_execution_pb2.ActionResult)
|
|
88 | 106 |
|
89 | 107 |
if reference_result is not None:
|
90 | 108 |
if self._action_result_blobs_still_exist(reference_result):
|
... | ... | @@ -115,9 +133,11 @@ class ReferenceCache: |
115 | 133 |
while len(self._digest_map) >= self._max_cached_refs:
|
116 | 134 |
self._digest_map.popitem(last=False)
|
117 | 135 |
|
118 |
- result_digest = self._storage.put_message(result)
|
|
136 |
+ result_digest = self.__storage.put_message(result)
|
|
119 | 137 |
self._digest_map[key] = result_digest
|
120 | 138 |
|
139 |
+ # --- Private API ---
|
|
140 |
+ |
|
121 | 141 |
def _action_result_blobs_still_exist(self, action_result):
|
122 | 142 |
"""Checks CAS for ActionResult output blobs existance.
|
123 | 143 |
|
... | ... | @@ -135,8 +155,8 @@ class ReferenceCache: |
135 | 155 |
|
136 | 156 |
for output_directory in action_result.output_directories:
|
137 | 157 |
blobs_needed.append(output_directory.tree_digest)
|
138 |
- tree = self._storage.get_message(output_directory.tree_digest,
|
|
139 |
- remote_execution_pb2.Tree)
|
|
158 |
+ tree = self.__storage.get_message(output_directory.tree_digest,
|
|
159 |
+ remote_execution_pb2.Tree)
|
|
140 | 160 |
if tree is None:
|
141 | 161 |
return False
|
142 | 162 |
|
... | ... | @@ -153,5 +173,5 @@ class ReferenceCache: |
153 | 173 |
if action_result.stderr_digest.hash and not action_result.stderr_raw:
|
154 | 174 |
blobs_needed.append(action_result.stderr_digest)
|
155 | 175 |
|
156 |
- missing = self._storage.missing_blobs(blobs_needed)
|
|
176 |
+ missing = self.__storage.missing_blobs(blobs_needed)
|
|
157 | 177 |
return len(missing) == 0
|
... | ... | @@ -22,19 +22,23 @@ Schedules jobs. |
22 | 22 |
import bisect
|
23 | 23 |
from datetime import timedelta
|
24 | 24 |
import logging
|
25 |
+from threading import Lock
|
|
25 | 26 |
|
26 | 27 |
from buildgrid._enums import LeaseState, OperationStage
|
27 | 28 |
from buildgrid._exceptions import NotFoundError
|
28 | 29 |
from buildgrid.server.job import Job
|
30 |
+from buildgrid.utils import BrowserURL
|
|
29 | 31 |
|
30 | 32 |
|
31 | 33 |
class Scheduler:
|
32 | 34 |
|
33 | 35 |
MAX_N_TRIES = 5
|
34 | 36 |
|
35 |
- def __init__(self, action_cache=None, monitor=False):
|
|
37 |
+ def __init__(self, action_cache=None, action_browser_url=False, monitor=False):
|
|
36 | 38 |
self.__logger = logging.getLogger(__name__)
|
37 | 39 |
|
40 |
+ self._instance_name = None
|
|
41 |
+ |
|
38 | 42 |
self.__build_metadata_queues = None
|
39 | 43 |
|
40 | 44 |
self.__operations_by_stage = None
|
... | ... | @@ -43,20 +47,29 @@ class Scheduler: |
43 | 47 |
self.__retries_count = 0
|
44 | 48 |
|
45 | 49 |
self._action_cache = action_cache
|
50 |
+ self._action_browser_url = action_browser_url
|
|
46 | 51 |
|
47 | 52 |
self.__jobs_by_action = {} # Action to Job 1:1 mapping
|
48 | 53 |
self.__jobs_by_operation = {} # Operation to Job 1:1 mapping
|
49 | 54 |
self.__jobs_by_name = {} # Name to Job 1:1 mapping
|
50 | 55 |
|
51 | 56 |
self.__queue = []
|
57 |
+ self.__queue_lock = Lock()
|
|
52 | 58 |
|
53 |
- self._is_instrumented = monitor
|
|
54 |
- |
|
55 |
- if self._is_instrumented:
|
|
59 |
+ self._is_instrumented = False
|
|
60 |
+ if monitor:
|
|
56 | 61 |
self.activate_monitoring()
|
57 | 62 |
|
58 | 63 |
# --- Public API ---
|
59 | 64 |
|
65 |
+ @property
|
|
66 |
+ def instance_name(self):
|
|
67 |
+ return self._instance_name
|
|
68 |
+ |
|
69 |
+ def set_instance_name(self, instance_name):
|
|
70 |
+ if not self._instance_name:
|
|
71 |
+ self._instance_name = instance_name
|
|
72 |
+ |
|
60 | 73 |
def list_current_jobs(self):
|
61 | 74 |
"""Returns a list of the :class:`Job` names currently managed."""
|
62 | 75 |
return self.__jobs_by_name.keys()
|
... | ... | @@ -186,6 +199,10 @@ class Scheduler: |
186 | 199 |
platform_requirements=platform_requirements,
|
187 | 200 |
priority=priority)
|
188 | 201 |
|
202 |
+ if self._action_browser_url:
|
|
203 |
+ job.set_action_url(
|
|
204 |
+ BrowserURL(self._action_browser_url, self._instance_name))
|
|
205 |
+ |
|
189 | 206 |
self.__logger.debug("Job created for action [%s]: [%s]",
|
190 | 207 |
action_digest.hash[:8], job.name)
|
191 | 208 |
|
... | ... | @@ -281,27 +298,26 @@ class Scheduler: |
281 | 298 |
worker properties, configuration and state at the time of the
|
282 | 299 |
request.
|
283 | 300 |
"""
|
284 |
- if not self.__queue:
|
|
285 |
- return []
|
|
286 |
- |
|
287 |
- # Looking for the first job that could be assigned to the worker...
|
|
288 |
- for job_index, job in enumerate(self.__queue):
|
|
289 |
- if self._worker_is_capable(worker_capabilities, job):
|
|
290 |
- self.__logger.info("Job scheduled to run: [%s]", job.name)
|
|
301 |
+ # TODO: Replace with a more efficient way of doing this.
|
|
302 |
+ with self.__queue_lock:
|
|
303 |
+ # Looking for the first job that could be assigned to the worker...
|
|
304 |
+ for job_index, job in enumerate(self.__queue):
|
|
305 |
+ if self._worker_is_capable(worker_capabilities, job):
|
|
306 |
+ self.__logger.info("Job scheduled to run: [%s]", job.name)
|
|
291 | 307 |
|
292 |
- lease = job.lease
|
|
308 |
+ lease = job.lease
|
|
293 | 309 |
|
294 |
- if not lease:
|
|
295 |
- # For now, one lease at a time:
|
|
296 |
- lease = job.create_lease()
|
|
310 |
+ if not lease:
|
|
311 |
+ # For now, one lease at a time:
|
|
312 |
+ lease = job.create_lease()
|
|
297 | 313 |
|
298 |
- if lease:
|
|
299 |
- del self.__queue[job_index]
|
|
300 |
- return [lease]
|
|
314 |
+ if lease:
|
|
315 |
+ del self.__queue[job_index]
|
|
316 |
+ return [lease]
|
|
301 | 317 |
|
302 |
- return None
|
|
318 |
+ return []
|
|
303 | 319 |
|
304 |
- return None
|
|
320 |
+ return []
|
|
305 | 321 |
|
306 | 322 |
def update_job_lease_state(self, job_name, lease):
|
307 | 323 |
"""Requests a state transition for a job's current :class:Lease.
|
... | ... | @@ -535,11 +551,12 @@ class Scheduler: |
535 | 551 |
"""Schedules or reschedules a job."""
|
536 | 552 |
job = self.__jobs_by_name[job_name]
|
537 | 553 |
|
538 |
- if job.operation_stage == OperationStage.QUEUED:
|
|
539 |
- self.__queue.sort()
|
|
554 |
+ with self.__queue_lock:
|
|
555 |
+ if job.operation_stage == OperationStage.QUEUED:
|
|
556 |
+ self.__queue.sort()
|
|
540 | 557 |
|
541 |
- else:
|
|
542 |
- bisect.insort(self.__queue, job)
|
|
558 |
+ else:
|
|
559 |
+ bisect.insort(self.__queue, job)
|
|
543 | 560 |
|
544 | 561 |
self.__logger.info("Job queued: [%s]", job.name)
|
545 | 562 |
|
... | ... | @@ -548,7 +565,8 @@ class Scheduler: |
548 | 565 |
job = self.__jobs_by_name[job_name]
|
549 | 566 |
|
550 | 567 |
if job.operation_stage == OperationStage.QUEUED:
|
551 |
- self.__queue.remove(job)
|
|
568 |
+ with self.__queue_lock:
|
|
569 |
+ self.__queue.remove(job)
|
|
552 | 570 |
|
553 | 571 |
del self.__jobs_by_action[job.action_digest.hash]
|
554 | 572 |
del self.__jobs_by_name[job.name]
|
... | ... | @@ -16,10 +16,16 @@ |
16 | 16 |
import hashlib
|
17 | 17 |
|
18 | 18 |
|
19 |
+# Latest REAPI version supported:
|
|
20 |
+HIGH_REAPI_VERSION = '2.0.0'
|
|
21 |
+ |
|
22 |
+# Earliest non-deprecated REAPI version supported:
|
|
23 |
+LOW_REAPI_VERSION = '2.0.0'
|
|
24 |
+ |
|
19 | 25 |
# Hash function used for computing digests:
|
20 | 26 |
HASH = hashlib.sha256
|
21 | 27 |
|
22 |
-# Lenght in bytes of a hash string returned by HASH:
|
|
28 |
+# Length in bytes of a hash string returned by HASH:
|
|
23 | 29 |
HASH_LENGTH = HASH().digest_size * 2
|
24 | 30 |
|
25 | 31 |
# Period, in seconds, for the monitoring cycle:
|
... | ... | @@ -31,7 +37,20 @@ MAX_REQUEST_SIZE = 2 * 1024 * 1024 |
31 | 37 |
# Maximum number of elements per gRPC request:
|
32 | 38 |
MAX_REQUEST_COUNT = 500
|
33 | 39 |
|
40 |
+# Value that establishes an upper bound on the size of a file that can
|
|
41 |
+# be queued into a batch request. Expressed as a percentage of the
|
|
42 |
+# batch size limit:
|
|
43 |
+BATCH_REQUEST_SIZE_THRESHOLD = 0.25
|
|
44 |
+ |
|
34 | 45 |
# String format for log records:
|
35 | 46 |
LOG_RECORD_FORMAT = '%(asctime)s:[%(name)36.36s][%(levelname)5.5s]: %(message)s'
|
36 | 47 |
# The different log record attributes are documented here:
|
37 | 48 |
# https://docs.python.org/3/library/logging.html#logrecord-attributes
|
49 |
+ |
|
50 |
+# URL scheme for the CAS content browser:
|
|
51 |
+BROWSER_URL_FORMAT = '%(type)s/%(instance)s/%(hash)s/%(sizebytes)s/'
|
|
52 |
+# The string markers that are substituted are:
|
|
53 |
+# instance - CAS instance's name.
|
|
54 |
+# type - Type of CAS object, eg. 'action_result', 'command'...
|
|
55 |
+# hash - Object's digest hash.
|
|
56 |
+# sizebytes - Object's digest size in bytes.
|
... | ... | @@ -13,14 +13,61 @@ |
13 | 13 |
# limitations under the License.
|
14 | 14 |
|
15 | 15 |
|
16 |
+from urllib.parse import urljoin
|
|
16 | 17 |
from operator import attrgetter
|
17 | 18 |
import os
|
18 | 19 |
import socket
|
19 | 20 |
|
20 |
-from buildgrid.settings import HASH, HASH_LENGTH
|
|
21 |
+from buildgrid.settings import HASH, HASH_LENGTH, BROWSER_URL_FORMAT
|
|
21 | 22 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
22 | 23 |
|
23 | 24 |
|
25 |
+class BrowserURL:
|
|
26 |
+ |
|
27 |
+ __url_markers = (
|
|
28 |
+ '%(instance)s',
|
|
29 |
+ '%(type)s',
|
|
30 |
+ '%(hash)s',
|
|
31 |
+ '%(sizebytes)s',
|
|
32 |
+ )
|
|
33 |
+ |
|
34 |
+ def __init__(self, base_url, instance_name=None):
|
|
35 |
+ """Begins browser URL helper initialization."""
|
|
36 |
+ self.__base_url = base_url
|
|
37 |
+ self.__initialized = False
|
|
38 |
+ self.__url_spec = {
|
|
39 |
+ '%(instance)s': instance_name or '',
|
|
40 |
+ }
|
|
41 |
+ |
|
42 |
+ def for_message(self, message_type, message_digest):
|
|
43 |
+ """Completes browser URL initialization for a protobuf message."""
|
|
44 |
+ if self.__initialized:
|
|
45 |
+ return False
|
|
46 |
+ |
|
47 |
+ self.__url_spec['%(type)s'] = message_type
|
|
48 |
+ self.__url_spec['%(hash)s'] = message_digest.hash
|
|
49 |
+ self.__url_spec['%(sizebytes)s'] = str(message_digest.size_bytes)
|
|
50 |
+ |
|
51 |
+ self.__initialized = True
|
|
52 |
+ return True
|
|
53 |
+ |
|
54 |
+ def generate(self):
|
|
55 |
+ """Generates a browser URL string."""
|
|
56 |
+ if not self.__base_url or not self.__initialized:
|
|
57 |
+ return None
|
|
58 |
+ |
|
59 |
+ url_tail = BROWSER_URL_FORMAT
|
|
60 |
+ |
|
61 |
+ for url_marker in self.__url_markers:
|
|
62 |
+ if url_marker not in self.__url_spec:
|
|
63 |
+ return None
|
|
64 |
+ if url_marker not in url_tail:
|
|
65 |
+ continue
|
|
66 |
+ url_tail = url_tail.replace(url_marker, self.__url_spec[url_marker])
|
|
67 |
+ |
|
68 |
+ return urljoin(self.__base_url, url_tail)
|
|
69 |
+ |
|
70 |
+ |
|
24 | 71 |
def get_hostname():
|
25 | 72 |
"""Returns the hostname of the machine executing that function.
|
26 | 73 |
|
... | ... | @@ -66,10 +66,10 @@ def controller(request): |
66 | 66 |
|
67 | 67 |
if request.param == "action-cache":
|
68 | 68 |
cache = ActionCache(storage, 50)
|
69 |
- yield ExecutionController(cache, storage)
|
|
69 |
+ yield ExecutionController(storage=storage, action_cache=cache)
|
|
70 | 70 |
|
71 | 71 |
else:
|
72 |
- yield ExecutionController(None, storage)
|
|
72 |
+ yield ExecutionController(storage=storage)
|
|
73 | 73 |
|
74 | 74 |
|
75 | 75 |
# Instance to test
|
... | ... | @@ -71,7 +71,7 @@ def controller(): |
71 | 71 |
write_session.write(action.SerializeToString())
|
72 | 72 |
storage.commit_write(action_digest, write_session)
|
73 | 73 |
|
74 |
- yield ExecutionController(None, storage)
|
|
74 |
+ yield ExecutionController(storage=storage)
|
|
75 | 75 |
|
76 | 76 |
|
77 | 77 |
# Instance to test
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+from urllib.parse import urlparse
|
|
17 |
+ |
|
18 |
+import pytest
|
|
19 |
+ |
|
20 |
+from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
|
21 |
+from buildgrid.utils import BrowserURL
|
|
22 |
+from buildgrid.utils import get_hash_type
|
|
23 |
+from buildgrid.utils import create_digest, parse_digest
|
|
24 |
+ |
|
25 |
+ |
|
26 |
+BLOBS = (b'', b'non-empty-blob',)
|
|
27 |
+BLOB_HASHES = (
|
|
28 |
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
|
|
29 |
+ '89070dfb3175a2c75835d70147b52bd97afd8228819566d84eecd2d20e9b19fc',)
|
|
30 |
+BLOB_SIZES = (0, 14,)
|
|
31 |
+BLOB_DATA = zip(BLOBS, BLOB_HASHES, BLOB_SIZES)
|
|
32 |
+ |
|
33 |
+STRINGS = (
|
|
34 |
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/0',
|
|
35 |
+ '89070dfb3175a2c75835d70147b52bd97afd8228819566d84eecd2d20e9b19fc/14',
|
|
36 |
+ 'e1ca41574914ba00e8ed5c8fc78ec8efdfd48941c7e48ad74dad8ada7f2066d/12', )
|
|
37 |
+BLOB_HASHES = (
|
|
38 |
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
|
|
39 |
+ '89070dfb3175a2c75835d70147b52bd97afd8228819566d84eecd2d20e9b19fc',
|
|
40 |
+ None, )
|
|
41 |
+BLOB_SIZES = (0, 14, None,)
|
|
42 |
+STRING_VALIDITIES = (True, True, False,)
|
|
43 |
+STRING_DATA = zip(STRINGS, BLOB_HASHES, BLOB_SIZES, STRING_VALIDITIES)
|
|
44 |
+ |
|
45 |
+BASE_URL = 'http://localhost:8080'
|
|
46 |
+INSTANCES = (None, '', 'instance',)
|
|
47 |
+URL_HASHES = (
|
|
48 |
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
|
|
49 |
+ '89070dfb3175a2c75835d70147b52bd97afd8228819566d84eecd2d20e9b19fc',)
|
|
50 |
+URL_SIZES = (0, 14,)
|
|
51 |
+URL_DATA = zip(URL_HASHES, URL_SIZES)
|
|
52 |
+ |
|
53 |
+ |
|
54 |
+@pytest.mark.parametrize('blob,digest_hash,digest_size', BLOB_DATA)
|
|
55 |
+def test_create_digest(blob, digest_hash, digest_size):
|
|
56 |
+ # Generate a Digest message from given blob:
|
|
57 |
+ blob_digest = create_digest(blob)
|
|
58 |
+ |
|
59 |
+ assert get_hash_type() == remote_execution_pb2.SHA256
|
|
60 |
+ |
|
61 |
+ assert hasattr(blob_digest, 'DESCRIPTOR')
|
|
62 |
+ assert blob_digest.DESCRIPTOR == remote_execution_pb2.Digest.DESCRIPTOR
|
|
63 |
+ assert blob_digest.hash == digest_hash
|
|
64 |
+ assert blob_digest.size_bytes == digest_size
|
|
65 |
+ |
|
66 |
+ |
|
67 |
+@pytest.mark.parametrize('string,digest_hash,digest_size,validity', STRING_DATA)
|
|
68 |
+def test_parse_digest(string, digest_hash, digest_size, validity):
|
|
69 |
+ # Generate a Digest message from given string:
|
|
70 |
+ string_digest = parse_digest(string)
|
|
71 |
+ |
|
72 |
+ assert get_hash_type() == remote_execution_pb2.SHA256
|
|
73 |
+ |
|
74 |
+ if validity:
|
|
75 |
+ assert hasattr(string_digest, 'DESCRIPTOR')
|
|
76 |
+ assert string_digest.DESCRIPTOR == remote_execution_pb2.Digest.DESCRIPTOR
|
|
77 |
+ assert string_digest.hash == digest_hash
|
|
78 |
+ assert string_digest.size_bytes == digest_size
|
|
79 |
+ |
|
80 |
+ else:
|
|
81 |
+ assert string_digest is None
|
|
82 |
+ |
|
83 |
+ |
|
84 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
85 |
+@pytest.mark.parametrize('digest_hash,digest_size', URL_DATA)
|
|
86 |
+def test_browser_url_initialization(instance, digest_hash, digest_size):
|
|
87 |
+ # Initialize and generate a browser compatible URL:
|
|
88 |
+ browser_url = BrowserURL(BASE_URL, instance)
|
|
89 |
+ browser_digest = remote_execution_pb2.Digest(hash=digest_hash,
|
|
90 |
+ size_bytes=digest_size)
|
|
91 |
+ |
|
92 |
+ assert browser_url.generate() is None
|
|
93 |
+ assert browser_url.for_message('type', browser_digest)
|
|
94 |
+ assert not browser_url.for_message(None, None)
|
|
95 |
+ |
|
96 |
+ url = browser_url.generate()
|
|
97 |
+ |
|
98 |
+ assert url is not None
|
|
99 |
+ |
|
100 |
+ parsed_url = urlparse(url)
|
|
101 |
+ |
|
102 |
+ if instance:
|
|
103 |
+ assert parsed_url.path.find(instance)
|
|
104 |
+ assert parsed_url.path.find('type') > 0
|
|
105 |
+ assert parsed_url.path.find(digest_hash) > 0
|
|
106 |
+ assert parsed_url.path.find(str(digest_size)) > 0
|