Martin Blanchard pushed to branch mablanch/668-remote-build-failure at BuildStream / buildstream
Commits:
-
b199afe6
by Jürg Billeter at 2018-09-25T09:01:51Z
-
7d199322
by Jürg Billeter at 2018-09-25T09:01:51Z
-
e2e24015
by Jürg Billeter at 2018-09-25T09:01:51Z
-
697d10f2
by Jürg Billeter at 2018-09-25T09:01:51Z
-
81c51dbf
by Jürg Billeter at 2018-09-25T09:31:55Z
-
efd1b202
by Martin Blanchard at 2018-09-25T09:54:33Z
-
f8bb166a
by Martin Blanchard at 2018-09-25T10:14:03Z
6 changed files:
- buildstream/_artifactcache/cascache.py
- buildstream/_artifactcache/casserver.py
- + buildstream/_protos/google/rpc/code.proto
- + buildstream/_protos/google/rpc/code_pb2.py
- + buildstream/_protos/google/rpc/code_pb2_grpc.py
- buildstream/sandbox/_sandboxremote.py
Changes:
... | ... | @@ -44,6 +44,11 @@ from .._exceptions import ArtifactError |
44 | 44 |
from . import ArtifactCache
|
45 | 45 |
|
46 | 46 |
|
47 |
+# The default limit for gRPC messages is 4 MiB.
|
|
48 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
49 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
50 |
+ |
|
51 |
+ |
|
47 | 52 |
# A CASCache manages artifacts in a CAS repository as specified in the
|
48 | 53 |
# Remote Execution API.
|
49 | 54 |
#
|
... | ... | @@ -854,6 +859,80 @@ class CASCache(ArtifactCache): |
854 | 859 |
|
855 | 860 |
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
|
856 | 861 |
|
862 |
+ # _ensure_blob():
|
|
863 |
+ #
|
|
864 |
+ # Fetch and add blob if it's not already local.
|
|
865 |
+ #
|
|
866 |
+ # Args:
|
|
867 |
+ # remote (Remote): The remote to use.
|
|
868 |
+ # digest (Digest): Digest object for the blob to fetch.
|
|
869 |
+ #
|
|
870 |
+ # Returns:
|
|
871 |
+ # (str): The path of the object
|
|
872 |
+ #
|
|
873 |
+ def _ensure_blob(self, remote, digest):
|
|
874 |
+ objpath = self.objpath(digest)
|
|
875 |
+ if os.path.exists(objpath):
|
|
876 |
+ # already in local repository
|
|
877 |
+ return objpath
|
|
878 |
+ |
|
879 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
880 |
+ self._fetch_blob(remote, digest, f)
|
|
881 |
+ |
|
882 |
+ added_digest = self.add_object(path=f.name)
|
|
883 |
+ assert added_digest.hash == digest.hash
|
|
884 |
+ |
|
885 |
+ return objpath
|
|
886 |
+ |
|
887 |
+ def _batch_download_complete(self, batch):
|
|
888 |
+ for digest, data in batch.send():
|
|
889 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
890 |
+ f.write(data)
|
|
891 |
+ f.flush()
|
|
892 |
+ |
|
893 |
+ added_digest = self.add_object(path=f.name)
|
|
894 |
+ assert added_digest.hash == digest.hash
|
|
895 |
+ |
|
896 |
+ # Helper function for _fetch_directory().
|
|
897 |
+ def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
|
|
898 |
+ self._batch_download_complete(batch)
|
|
899 |
+ |
|
900 |
+ # All previously scheduled directories are now locally available,
|
|
901 |
+ # move them to the processing queue.
|
|
902 |
+ fetch_queue.extend(fetch_next_queue)
|
|
903 |
+ fetch_next_queue.clear()
|
|
904 |
+ return _CASBatchRead(remote)
|
|
905 |
+ |
|
906 |
+ # Helper function for _fetch_directory().
|
|
907 |
+ def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
|
|
908 |
+ in_local_cache = os.path.exists(self.objpath(digest))
|
|
909 |
+ |
|
910 |
+ if in_local_cache:
|
|
911 |
+ # Skip download, already in local cache.
|
|
912 |
+ pass
|
|
913 |
+ elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
914 |
+ not remote.batch_read_supported):
|
|
915 |
+ # Too large for batch request, download in independent request.
|
|
916 |
+ self._ensure_blob(remote, digest)
|
|
917 |
+ in_local_cache = True
|
|
918 |
+ else:
|
|
919 |
+ if not batch.add(digest):
|
|
920 |
+ # Not enough space left in batch request.
|
|
921 |
+ # Complete pending batch first.
|
|
922 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
923 |
+ batch.add(digest)
|
|
924 |
+ |
|
925 |
+ if recursive:
|
|
926 |
+ if in_local_cache:
|
|
927 |
+ # Add directory to processing queue.
|
|
928 |
+ fetch_queue.append(digest)
|
|
929 |
+ else:
|
|
930 |
+ # Directory will be available after completing pending batch.
|
|
931 |
+ # Add directory to deferred processing queue.
|
|
932 |
+ fetch_next_queue.append(digest)
|
|
933 |
+ |
|
934 |
+ return batch
|
|
935 |
+ |
|
857 | 936 |
# _fetch_directory():
|
858 | 937 |
#
|
859 | 938 |
# Fetches remote directory and adds it to content addressable store.
|
... | ... | @@ -867,39 +946,32 @@ class CASCache(ArtifactCache): |
867 | 946 |
# dir_digest (Digest): Digest object for the directory to fetch.
|
868 | 947 |
#
|
869 | 948 |
def _fetch_directory(self, remote, dir_digest):
|
870 |
- objpath = self.objpath(dir_digest)
|
|
871 |
- if os.path.exists(objpath):
|
|
872 |
- # already in local cache
|
|
873 |
- return
|
|
874 |
- |
|
875 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
|
|
876 |
- self._fetch_blob(remote, dir_digest, out)
|
|
877 |
- |
|
878 |
- directory = remote_execution_pb2.Directory()
|
|
949 |
+ fetch_queue = [dir_digest]
|
|
950 |
+ fetch_next_queue = []
|
|
951 |
+ batch = _CASBatchRead(remote)
|
|
879 | 952 |
|
880 |
- with open(out.name, 'rb') as f:
|
|
881 |
- directory.ParseFromString(f.read())
|
|
953 |
+ while len(fetch_queue) + len(fetch_next_queue) > 0:
|
|
954 |
+ if len(fetch_queue) == 0:
|
|
955 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
882 | 956 |
|
883 |
- for filenode in directory.files:
|
|
884 |
- fileobjpath = self.objpath(filenode.digest)
|
|
885 |
- if os.path.exists(fileobjpath):
|
|
886 |
- # already in local cache
|
|
887 |
- continue
|
|
957 |
+ dir_digest = fetch_queue.pop(0)
|
|
888 | 958 |
|
889 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
890 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
959 |
+ objpath = self._ensure_blob(remote, dir_digest)
|
|
891 | 960 |
|
892 |
- digest = self.add_object(path=f.name)
|
|
893 |
- assert digest.hash == filenode.digest.hash
|
|
961 |
+ directory = remote_execution_pb2.Directory()
|
|
962 |
+ with open(objpath, 'rb') as f:
|
|
963 |
+ directory.ParseFromString(f.read())
|
|
894 | 964 |
|
895 | 965 |
for dirnode in directory.directories:
|
896 |
- self._fetch_directory(remote, dirnode.digest)
|
|
966 |
+ batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
|
967 |
+ fetch_queue, fetch_next_queue, recursive=True)
|
|
968 |
+ |
|
969 |
+ for filenode in directory.files:
|
|
970 |
+ batch = self._fetch_directory_node(remote, filenode.digest, batch,
|
|
971 |
+ fetch_queue, fetch_next_queue)
|
|
897 | 972 |
|
898 |
- # Place directory blob only in final location when we've
|
|
899 |
- # downloaded all referenced blobs to avoid dangling
|
|
900 |
- # references in the repository.
|
|
901 |
- digest = self.add_object(path=out.name)
|
|
902 |
- assert digest.hash == dir_digest.hash
|
|
973 |
+ # Fetch final batch
|
|
974 |
+ self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
903 | 975 |
|
904 | 976 |
def _fetch_tree(self, remote, digest):
|
905 | 977 |
# download but do not store the Tree object
|
... | ... | @@ -914,16 +986,7 @@ class CASCache(ArtifactCache): |
914 | 986 |
tree.children.extend([tree.root])
|
915 | 987 |
for directory in tree.children:
|
916 | 988 |
for filenode in directory.files:
|
917 |
- fileobjpath = self.objpath(filenode.digest)
|
|
918 |
- if os.path.exists(fileobjpath):
|
|
919 |
- # already in local cache
|
|
920 |
- continue
|
|
921 |
- |
|
922 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
923 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
924 |
- |
|
925 |
- added_digest = self.add_object(path=f.name)
|
|
926 |
- assert added_digest.hash == filenode.digest.hash
|
|
989 |
+ self._ensure_blob(remote, filenode.digest)
|
|
927 | 990 |
|
928 | 991 |
# place directory blob only in final location when we've downloaded
|
929 | 992 |
# all referenced blobs to avoid dangling references in the repository
|
... | ... | @@ -942,12 +1005,12 @@ class CASCache(ArtifactCache): |
942 | 1005 |
finished = False
|
943 | 1006 |
remaining = digest.size_bytes
|
944 | 1007 |
while not finished:
|
945 |
- chunk_size = min(remaining, 64 * 1024)
|
|
1008 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
946 | 1009 |
remaining -= chunk_size
|
947 | 1010 |
|
948 | 1011 |
request = bytestream_pb2.WriteRequest()
|
949 | 1012 |
request.write_offset = offset
|
950 |
- # max. 64 kB chunks
|
|
1013 |
+ # max. _MAX_PAYLOAD_BYTES chunks
|
|
951 | 1014 |
request.data = instream.read(chunk_size)
|
952 | 1015 |
request.resource_name = resname
|
953 | 1016 |
request.finish_write = remaining <= 0
|
... | ... | @@ -1035,11 +1098,78 @@ class _CASRemote(): |
1035 | 1098 |
|
1036 | 1099 |
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
|
1037 | 1100 |
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
|
1101 |
+ self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
|
|
1038 | 1102 |
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
|
1039 | 1103 |
|
1104 |
+ self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
1105 |
+ try:
|
|
1106 |
+ request = remote_execution_pb2.GetCapabilitiesRequest()
|
|
1107 |
+ response = self.capabilities.GetCapabilities(request)
|
|
1108 |
+ server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
|
|
1109 |
+ if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
|
|
1110 |
+ self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
|
|
1111 |
+ except grpc.RpcError as e:
|
|
1112 |
+ # Simply use the defaults for servers that don't implement GetCapabilities()
|
|
1113 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1114 |
+ raise
|
|
1115 |
+ |
|
1116 |
+ # Check whether the server supports BatchReadBlobs()
|
|
1117 |
+ self.batch_read_supported = False
|
|
1118 |
+ try:
|
|
1119 |
+ request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1120 |
+ response = self.cas.BatchReadBlobs(request)
|
|
1121 |
+ self.batch_read_supported = True
|
|
1122 |
+ except grpc.RpcError as e:
|
|
1123 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1124 |
+ raise
|
|
1125 |
+ |
|
1040 | 1126 |
self._initialized = True
|
1041 | 1127 |
|
1042 | 1128 |
|
1129 |
+# Represents a batch of blobs queued for fetching.
|
|
1130 |
+#
|
|
1131 |
+class _CASBatchRead():
|
|
1132 |
+ def __init__(self, remote):
|
|
1133 |
+ self._remote = remote
|
|
1134 |
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
|
|
1135 |
+ self._request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1136 |
+ self._size = 0
|
|
1137 |
+ self._sent = False
|
|
1138 |
+ |
|
1139 |
+ def add(self, digest):
|
|
1140 |
+ assert not self._sent
|
|
1141 |
+ |
|
1142 |
+ new_batch_size = self._size + digest.size_bytes
|
|
1143 |
+ if new_batch_size > self._max_total_size_bytes:
|
|
1144 |
+ # Not enough space left in current batch
|
|
1145 |
+ return False
|
|
1146 |
+ |
|
1147 |
+ request_digest = self._request.digests.add()
|
|
1148 |
+ request_digest.hash = digest.hash
|
|
1149 |
+ request_digest.size_bytes = digest.size_bytes
|
|
1150 |
+ self._size = new_batch_size
|
|
1151 |
+ return True
|
|
1152 |
+ |
|
1153 |
+ def send(self):
|
|
1154 |
+ assert not self._sent
|
|
1155 |
+ self._sent = True
|
|
1156 |
+ |
|
1157 |
+ if len(self._request.digests) == 0:
|
|
1158 |
+ return
|
|
1159 |
+ |
|
1160 |
+ batch_response = self._remote.cas.BatchReadBlobs(self._request)
|
|
1161 |
+ |
|
1162 |
+ for response in batch_response.responses:
|
|
1163 |
+ if response.status.code != grpc.StatusCode.OK.value[0]:
|
|
1164 |
+ raise ArtifactError("Failed to download blob {}: {}".format(
|
|
1165 |
+ response.digest.hash, response.status.code))
|
|
1166 |
+ if response.digest.size_bytes != len(response.data):
|
|
1167 |
+ raise ArtifactError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
|
|
1168 |
+ response.digest.hash, response.digest.size_bytes, len(response.data)))
|
|
1169 |
+ |
|
1170 |
+ yield (response.digest, response.data)
|
|
1171 |
+ |
|
1172 |
+ |
|
1043 | 1173 |
def _grouper(iterable, n):
|
1044 | 1174 |
while True:
|
1045 | 1175 |
try:
|
... | ... | @@ -38,8 +38,9 @@ from .._context import Context |
38 | 38 |
from .cascache import CASCache
|
39 | 39 |
|
40 | 40 |
|
41 |
-# The default limit for gRPC messages is 4 MiB
|
|
42 |
-_MAX_BATCH_TOTAL_SIZE_BYTES = 4 * 1024 * 1024
|
|
41 |
+# The default limit for gRPC messages is 4 MiB.
|
|
42 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
43 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
43 | 44 |
|
44 | 45 |
|
45 | 46 |
# Trying to push an artifact that is too large
|
... | ... | @@ -158,7 +159,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): |
158 | 159 |
|
159 | 160 |
remaining = client_digest.size_bytes - request.read_offset
|
160 | 161 |
while remaining > 0:
|
161 |
- chunk_size = min(remaining, 64 * 1024)
|
|
162 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
162 | 163 |
remaining -= chunk_size
|
163 | 164 |
|
164 | 165 |
response = bytestream_pb2.ReadResponse()
|
... | ... | @@ -242,7 +243,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres |
242 | 243 |
|
243 | 244 |
for digest in request.digests:
|
244 | 245 |
batch_size += digest.size_bytes
|
245 |
- if batch_size > _MAX_BATCH_TOTAL_SIZE_BYTES:
|
|
246 |
+ if batch_size > _MAX_PAYLOAD_BYTES:
|
|
246 | 247 |
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
247 | 248 |
return response
|
248 | 249 |
|
... | ... | @@ -269,7 +270,7 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): |
269 | 270 |
cache_capabilities = response.cache_capabilities
|
270 | 271 |
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
|
271 | 272 |
cache_capabilities.action_cache_update_capabilities.update_enabled = False
|
272 |
- cache_capabilities.max_batch_total_size_bytes = _MAX_BATCH_TOTAL_SIZE_BYTES
|
|
273 |
+ cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
273 | 274 |
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
|
274 | 275 |
|
275 | 276 |
response.deprecated_api_version.major = 2
|
1 |
+// Copyright 2017 Google Inc.
|
|
2 |
+//
|
|
3 |
+// Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+// you may not use this file except in compliance with the License.
|
|
5 |
+// You may obtain a copy of the License at
|
|
6 |
+//
|
|
7 |
+// http://www.apache.org/licenses/LICENSE-2.0
|
|
8 |
+//
|
|
9 |
+// Unless required by applicable law or agreed to in writing, software
|
|
10 |
+// distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+// See the License for the specific language governing permissions and
|
|
13 |
+// limitations under the License.
|
|
14 |
+ |
|
15 |
+syntax = "proto3";
|
|
16 |
+ |
|
17 |
+package google.rpc;
|
|
18 |
+ |
|
19 |
+option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
|
|
20 |
+option java_multiple_files = true;
|
|
21 |
+option java_outer_classname = "CodeProto";
|
|
22 |
+option java_package = "com.google.rpc";
|
|
23 |
+option objc_class_prefix = "RPC";
|
|
24 |
+ |
|
25 |
+ |
|
26 |
+// The canonical error codes for Google APIs.
|
|
27 |
+//
|
|
28 |
+//
|
|
29 |
+// Sometimes multiple error codes may apply. Services should return
|
|
30 |
+// the most specific error code that applies. For example, prefer
|
|
31 |
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
|
|
32 |
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
|
|
33 |
+enum Code {
|
|
34 |
+ // Not an error; returned on success
|
|
35 |
+ //
|
|
36 |
+ // HTTP Mapping: 200 OK
|
|
37 |
+ OK = 0;
|
|
38 |
+ |
|
39 |
+ // The operation was cancelled, typically by the caller.
|
|
40 |
+ //
|
|
41 |
+ // HTTP Mapping: 499 Client Closed Request
|
|
42 |
+ CANCELLED = 1;
|
|
43 |
+ |
|
44 |
+ // Unknown error. For example, this error may be returned when
|
|
45 |
+ // a `Status` value received from another address space belongs to
|
|
46 |
+ // an error space that is not known in this address space. Also
|
|
47 |
+ // errors raised by APIs that do not return enough error information
|
|
48 |
+ // may be converted to this error.
|
|
49 |
+ //
|
|
50 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
51 |
+ UNKNOWN = 2;
|
|
52 |
+ |
|
53 |
+ // The client specified an invalid argument. Note that this differs
|
|
54 |
+ // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
|
|
55 |
+ // that are problematic regardless of the state of the system
|
|
56 |
+ // (e.g., a malformed file name).
|
|
57 |
+ //
|
|
58 |
+ // HTTP Mapping: 400 Bad Request
|
|
59 |
+ INVALID_ARGUMENT = 3;
|
|
60 |
+ |
|
61 |
+ // The deadline expired before the operation could complete. For operations
|
|
62 |
+ // that change the state of the system, this error may be returned
|
|
63 |
+ // even if the operation has completed successfully. For example, a
|
|
64 |
+ // successful response from a server could have been delayed long
|
|
65 |
+ // enough for the deadline to expire.
|
|
66 |
+ //
|
|
67 |
+ // HTTP Mapping: 504 Gateway Timeout
|
|
68 |
+ DEADLINE_EXCEEDED = 4;
|
|
69 |
+ |
|
70 |
+ // Some requested entity (e.g., file or directory) was not found.
|
|
71 |
+ //
|
|
72 |
+ // Note to server developers: if a request is denied for an entire class
|
|
73 |
+ // of users, such as gradual feature rollout or undocumented whitelist,
|
|
74 |
+ // `NOT_FOUND` may be used. If a request is denied for some users within
|
|
75 |
+ // a class of users, such as user-based access control, `PERMISSION_DENIED`
|
|
76 |
+ // must be used.
|
|
77 |
+ //
|
|
78 |
+ // HTTP Mapping: 404 Not Found
|
|
79 |
+ NOT_FOUND = 5;
|
|
80 |
+ |
|
81 |
+ // The entity that a client attempted to create (e.g., file or directory)
|
|
82 |
+ // already exists.
|
|
83 |
+ //
|
|
84 |
+ // HTTP Mapping: 409 Conflict
|
|
85 |
+ ALREADY_EXISTS = 6;
|
|
86 |
+ |
|
87 |
+ // The caller does not have permission to execute the specified
|
|
88 |
+ // operation. `PERMISSION_DENIED` must not be used for rejections
|
|
89 |
+ // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
|
|
90 |
+ // instead for those errors). `PERMISSION_DENIED` must not be
|
|
91 |
+ // used if the caller can not be identified (use `UNAUTHENTICATED`
|
|
92 |
+ // instead for those errors). This error code does not imply the
|
|
93 |
+ // request is valid or the requested entity exists or satisfies
|
|
94 |
+ // other pre-conditions.
|
|
95 |
+ //
|
|
96 |
+ // HTTP Mapping: 403 Forbidden
|
|
97 |
+ PERMISSION_DENIED = 7;
|
|
98 |
+ |
|
99 |
+ // The request does not have valid authentication credentials for the
|
|
100 |
+ // operation.
|
|
101 |
+ //
|
|
102 |
+ // HTTP Mapping: 401 Unauthorized
|
|
103 |
+ UNAUTHENTICATED = 16;
|
|
104 |
+ |
|
105 |
+ // Some resource has been exhausted, perhaps a per-user quota, or
|
|
106 |
+ // perhaps the entire file system is out of space.
|
|
107 |
+ //
|
|
108 |
+ // HTTP Mapping: 429 Too Many Requests
|
|
109 |
+ RESOURCE_EXHAUSTED = 8;
|
|
110 |
+ |
|
111 |
+ // The operation was rejected because the system is not in a state
|
|
112 |
+ // required for the operation's execution. For example, the directory
|
|
113 |
+ // to be deleted is non-empty, an rmdir operation is applied to
|
|
114 |
+ // a non-directory, etc.
|
|
115 |
+ //
|
|
116 |
+ // Service implementors can use the following guidelines to decide
|
|
117 |
+ // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
|
|
118 |
+ // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
|
|
119 |
+ // (b) Use `ABORTED` if the client should retry at a higher level
|
|
120 |
+ // (e.g., when a client-specified test-and-set fails, indicating the
|
|
121 |
+ // client should restart a read-modify-write sequence).
|
|
122 |
+ // (c) Use `FAILED_PRECONDITION` if the client should not retry until
|
|
123 |
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
|
|
124 |
+ // fails because the directory is non-empty, `FAILED_PRECONDITION`
|
|
125 |
+ // should be returned since the client should not retry unless
|
|
126 |
+ // the files are deleted from the directory.
|
|
127 |
+ //
|
|
128 |
+ // HTTP Mapping: 400 Bad Request
|
|
129 |
+ FAILED_PRECONDITION = 9;
|
|
130 |
+ |
|
131 |
+ // The operation was aborted, typically due to a concurrency issue such as
|
|
132 |
+ // a sequencer check failure or transaction abort.
|
|
133 |
+ //
|
|
134 |
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
135 |
+ // `ABORTED`, and `UNAVAILABLE`.
|
|
136 |
+ //
|
|
137 |
+ // HTTP Mapping: 409 Conflict
|
|
138 |
+ ABORTED = 10;
|
|
139 |
+ |
|
140 |
+ // The operation was attempted past the valid range. E.g., seeking or
|
|
141 |
+ // reading past end-of-file.
|
|
142 |
+ //
|
|
143 |
+ // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
|
|
144 |
+ // be fixed if the system state changes. For example, a 32-bit file
|
|
145 |
+ // system will generate `INVALID_ARGUMENT` if asked to read at an
|
|
146 |
+ // offset that is not in the range [0,2^32-1], but it will generate
|
|
147 |
+ // `OUT_OF_RANGE` if asked to read from an offset past the current
|
|
148 |
+ // file size.
|
|
149 |
+ //
|
|
150 |
+ // There is a fair bit of overlap between `FAILED_PRECONDITION` and
|
|
151 |
+ // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
|
|
152 |
+ // error) when it applies so that callers who are iterating through
|
|
153 |
+ // a space can easily look for an `OUT_OF_RANGE` error to detect when
|
|
154 |
+ // they are done.
|
|
155 |
+ //
|
|
156 |
+ // HTTP Mapping: 400 Bad Request
|
|
157 |
+ OUT_OF_RANGE = 11;
|
|
158 |
+ |
|
159 |
+ // The operation is not implemented or is not supported/enabled in this
|
|
160 |
+ // service.
|
|
161 |
+ //
|
|
162 |
+ // HTTP Mapping: 501 Not Implemented
|
|
163 |
+ UNIMPLEMENTED = 12;
|
|
164 |
+ |
|
165 |
+ // Internal errors. This means that some invariants expected by the
|
|
166 |
+ // underlying system have been broken. This error code is reserved
|
|
167 |
+ // for serious errors.
|
|
168 |
+ //
|
|
169 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
170 |
+ INTERNAL = 13;
|
|
171 |
+ |
|
172 |
+ // The service is currently unavailable. This is most likely a
|
|
173 |
+ // transient condition, which can be corrected by retrying with
|
|
174 |
+ // a backoff.
|
|
175 |
+ //
|
|
176 |
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
177 |
+ // `ABORTED`, and `UNAVAILABLE`.
|
|
178 |
+ //
|
|
179 |
+ // HTTP Mapping: 503 Service Unavailable
|
|
180 |
+ UNAVAILABLE = 14;
|
|
181 |
+ |
|
182 |
+ // Unrecoverable data loss or corruption.
|
|
183 |
+ //
|
|
184 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
185 |
+ DATA_LOSS = 15;
|
|
186 |
+}
|
|
\ No newline at end of file |
1 |
+# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
2 |
+# source: google/rpc/code.proto
|
|
3 |
+ |
|
4 |
+import sys
|
|
5 |
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
|
6 |
+from google.protobuf.internal import enum_type_wrapper
|
|
7 |
+from google.protobuf import descriptor as _descriptor
|
|
8 |
+from google.protobuf import message as _message
|
|
9 |
+from google.protobuf import reflection as _reflection
|
|
10 |
+from google.protobuf import symbol_database as _symbol_database
|
|
11 |
+# @@protoc_insertion_point(imports)
|
|
12 |
+ |
|
13 |
+_sym_db = _symbol_database.Default()
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+ |
|
17 |
+ |
|
18 |
+DESCRIPTOR = _descriptor.FileDescriptor(
|
|
19 |
+ name='google/rpc/code.proto',
|
|
20 |
+ package='google.rpc',
|
|
21 |
+ syntax='proto3',
|
|
22 |
+ serialized_options=_b('\n\016com.google.rpcB\tCodeProtoP\001Z3google.golang.org/genproto/googleapis/rpc/code;code\242\002\003RPC'),
|
|
23 |
+ serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42X\n\x0e\x63om.google.rpcB\tCodeProtoP\x01Z3google.golang.org/genproto/googleapis/rpc/code;code\xa2\x02\x03RPCb\x06proto3')
|
|
24 |
+)
|
|
25 |
+ |
|
26 |
+_CODE = _descriptor.EnumDescriptor(
|
|
27 |
+ name='Code',
|
|
28 |
+ full_name='google.rpc.Code',
|
|
29 |
+ filename=None,
|
|
30 |
+ file=DESCRIPTOR,
|
|
31 |
+ values=[
|
|
32 |
+ _descriptor.EnumValueDescriptor(
|
|
33 |
+ name='OK', index=0, number=0,
|
|
34 |
+ serialized_options=None,
|
|
35 |
+ type=None),
|
|
36 |
+ _descriptor.EnumValueDescriptor(
|
|
37 |
+ name='CANCELLED', index=1, number=1,
|
|
38 |
+ serialized_options=None,
|
|
39 |
+ type=None),
|
|
40 |
+ _descriptor.EnumValueDescriptor(
|
|
41 |
+ name='UNKNOWN', index=2, number=2,
|
|
42 |
+ serialized_options=None,
|
|
43 |
+ type=None),
|
|
44 |
+ _descriptor.EnumValueDescriptor(
|
|
45 |
+ name='INVALID_ARGUMENT', index=3, number=3,
|
|
46 |
+ serialized_options=None,
|
|
47 |
+ type=None),
|
|
48 |
+ _descriptor.EnumValueDescriptor(
|
|
49 |
+ name='DEADLINE_EXCEEDED', index=4, number=4,
|
|
50 |
+ serialized_options=None,
|
|
51 |
+ type=None),
|
|
52 |
+ _descriptor.EnumValueDescriptor(
|
|
53 |
+ name='NOT_FOUND', index=5, number=5,
|
|
54 |
+ serialized_options=None,
|
|
55 |
+ type=None),
|
|
56 |
+ _descriptor.EnumValueDescriptor(
|
|
57 |
+ name='ALREADY_EXISTS', index=6, number=6,
|
|
58 |
+ serialized_options=None,
|
|
59 |
+ type=None),
|
|
60 |
+ _descriptor.EnumValueDescriptor(
|
|
61 |
+ name='PERMISSION_DENIED', index=7, number=7,
|
|
62 |
+ serialized_options=None,
|
|
63 |
+ type=None),
|
|
64 |
+ _descriptor.EnumValueDescriptor(
|
|
65 |
+ name='UNAUTHENTICATED', index=8, number=16,
|
|
66 |
+ serialized_options=None,
|
|
67 |
+ type=None),
|
|
68 |
+ _descriptor.EnumValueDescriptor(
|
|
69 |
+ name='RESOURCE_EXHAUSTED', index=9, number=8,
|
|
70 |
+ serialized_options=None,
|
|
71 |
+ type=None),
|
|
72 |
+ _descriptor.EnumValueDescriptor(
|
|
73 |
+ name='FAILED_PRECONDITION', index=10, number=9,
|
|
74 |
+ serialized_options=None,
|
|
75 |
+ type=None),
|
|
76 |
+ _descriptor.EnumValueDescriptor(
|
|
77 |
+ name='ABORTED', index=11, number=10,
|
|
78 |
+ serialized_options=None,
|
|
79 |
+ type=None),
|
|
80 |
+ _descriptor.EnumValueDescriptor(
|
|
81 |
+ name='OUT_OF_RANGE', index=12, number=11,
|
|
82 |
+ serialized_options=None,
|
|
83 |
+ type=None),
|
|
84 |
+ _descriptor.EnumValueDescriptor(
|
|
85 |
+ name='UNIMPLEMENTED', index=13, number=12,
|
|
86 |
+ serialized_options=None,
|
|
87 |
+ type=None),
|
|
88 |
+ _descriptor.EnumValueDescriptor(
|
|
89 |
+ name='INTERNAL', index=14, number=13,
|
|
90 |
+ serialized_options=None,
|
|
91 |
+ type=None),
|
|
92 |
+ _descriptor.EnumValueDescriptor(
|
|
93 |
+ name='UNAVAILABLE', index=15, number=14,
|
|
94 |
+ serialized_options=None,
|
|
95 |
+ type=None),
|
|
96 |
+ _descriptor.EnumValueDescriptor(
|
|
97 |
+ name='DATA_LOSS', index=16, number=15,
|
|
98 |
+ serialized_options=None,
|
|
99 |
+ type=None),
|
|
100 |
+ ],
|
|
101 |
+ containing_type=None,
|
|
102 |
+ serialized_options=None,
|
|
103 |
+ serialized_start=38,
|
|
104 |
+ serialized_end=349,
|
|
105 |
+)
|
|
106 |
+_sym_db.RegisterEnumDescriptor(_CODE)
|
|
107 |
+ |
|
108 |
+Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
|
|
109 |
+OK = 0
|
|
110 |
+CANCELLED = 1
|
|
111 |
+UNKNOWN = 2
|
|
112 |
+INVALID_ARGUMENT = 3
|
|
113 |
+DEADLINE_EXCEEDED = 4
|
|
114 |
+NOT_FOUND = 5
|
|
115 |
+ALREADY_EXISTS = 6
|
|
116 |
+PERMISSION_DENIED = 7
|
|
117 |
+UNAUTHENTICATED = 16
|
|
118 |
+RESOURCE_EXHAUSTED = 8
|
|
119 |
+FAILED_PRECONDITION = 9
|
|
120 |
+ABORTED = 10
|
|
121 |
+OUT_OF_RANGE = 11
|
|
122 |
+UNIMPLEMENTED = 12
|
|
123 |
+INTERNAL = 13
|
|
124 |
+UNAVAILABLE = 14
|
|
125 |
+DATA_LOSS = 15
|
|
126 |
+ |
|
127 |
+ |
|
128 |
+DESCRIPTOR.enum_types_by_name['Code'] = _CODE
|
|
129 |
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
|
130 |
+ |
|
131 |
+ |
|
132 |
+DESCRIPTOR._options = None
|
|
133 |
+# @@protoc_insertion_point(module_scope)
|
1 |
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
|
2 |
+import grpc
|
|
3 |
+ |
... | ... | @@ -27,6 +27,7 @@ from . import Sandbox |
27 | 27 |
from ..storage._filebaseddirectory import FileBasedDirectory
|
28 | 28 |
from ..storage._casbaseddirectory import CasBasedDirectory
|
29 | 29 |
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
30 |
+from .._protos.google.rpc import code_pb2
|
|
30 | 31 |
from .._platform import Platform
|
31 | 32 |
|
32 | 33 |
|
... | ... | @@ -207,15 +208,23 @@ class SandboxRemote(Sandbox): |
207 | 208 |
|
208 | 209 |
operation.response.Unpack(execution_response)
|
209 | 210 |
|
210 |
- if execution_response.status.code != 0:
|
|
211 |
- # A normal error during the build: the remote execution system
|
|
212 |
- # has worked correctly but the command failed.
|
|
213 |
- # execution_response.error also contains 'message' (str) and
|
|
214 |
- # 'details' (iterator of Any) which we ignore at the moment.
|
|
215 |
- return execution_response.status.code
|
|
211 |
+ if execution_response.status.code != code_pb2.OK:
|
|
212 |
+ # An unexpected error during execution: the remote execution
|
|
213 |
+ # system failed at processing the execution request.
|
|
214 |
+ if execution_response.status.message:
|
|
215 |
+ raise SandboxError(execution_response.status.message)
|
|
216 |
+ else:
|
|
217 |
+ raise SandboxError("Remote server failed at executing the build request.")
|
|
216 | 218 |
|
217 | 219 |
action_result = execution_response.result
|
218 | 220 |
|
221 |
+ if action_result.exit_code != 0:
|
|
222 |
+ # A normal error during the build: the remote execution system
|
|
223 |
+ # has worked correctly but the command failed.
|
|
224 |
+ # action_result.stdout and action_result.stderr also contains
|
|
225 |
+ # build command outputs which we ignore at the moment.
|
|
226 |
+ return action_result.exit_code
|
|
227 |
+ |
|
219 | 228 |
self.process_job_output(action_result.output_directories, action_result.output_files)
|
220 | 229 |
|
221 | 230 |
return 0
|