Jürg Billeter pushed to branch juerg/git-describe at BuildStream / buildstream
Commits:
-
da7e038b
by Jürg Billeter at 2018-09-24T13:58:55Z
-
86ea1173
by Jürg Billeter at 2018-09-24T13:59:59Z
-
a76339de
by Jürg Billeter at 2018-09-24T16:02:38Z
-
b199afe6
by Jürg Billeter at 2018-09-25T09:01:51Z
-
7d199322
by Jürg Billeter at 2018-09-25T09:01:51Z
-
e2e24015
by Jürg Billeter at 2018-09-25T09:01:51Z
-
697d10f2
by Jürg Billeter at 2018-09-25T09:01:51Z
-
81c51dbf
by Jürg Billeter at 2018-09-25T09:31:55Z
-
2df7d140
by Jim MacArthur at 2018-09-25T10:36:37Z
-
62f59382
by Jim MacArthur at 2018-09-25T10:36:37Z
-
8cea7b17
by Jim MacArthur at 2018-09-25T10:58:40Z
-
83ab183e
by Tiago Gomes at 2018-09-25T16:37:30Z
-
c8594806
by Tiago Gomes at 2018-09-25T17:03:55Z
-
fdbf9be1
by Tiago Gomes at 2018-09-26T08:17:46Z
-
9280b0cf
by Tiago Gomes at 2018-09-26T15:54:53Z
-
af131503
by Tiago Gomes at 2018-09-26T16:35:58Z
-
f7f14f25
by Tiago Gomes at 2018-09-26T17:42:12Z
-
0f2bc375
by Martin Blanchard at 2018-09-27T08:46:27Z
-
16cf9d5f
by Martin Blanchard at 2018-09-27T08:46:27Z
-
f5f3cb7c
by Jim MacArthur at 2018-09-27T09:16:38Z
-
b99a6740
by Jürg Billeter at 2018-09-27T09:18:03Z
-
f8bbe008
by Jürg Billeter at 2018-09-27T09:18:03Z
-
f9494f1f
by Jürg Billeter at 2018-09-27T09:18:03Z
-
b4ad84c3
by Jürg Billeter at 2018-09-27T09:18:03Z
-
966af3d2
by Jürg Billeter at 2018-09-27T09:18:03Z
-
f08e5eae
by Jürg Billeter at 2018-09-27T09:18:03Z
-
10ed9158
by Jürg Billeter at 2018-09-27T09:18:03Z
-
7aec8b0f
by Jürg Billeter at 2018-09-27T09:18:03Z
-
a5025c33
by Jürg Billeter at 2018-09-27T09:18:03Z
-
ef1cb374
by Jürg Billeter at 2018-09-27T09:18:03Z
-
52991be1
by Jürg Billeter at 2018-09-27T09:18:03Z
-
cb6b35dc
by Jürg Billeter at 2018-09-27T09:18:42Z
-
28c62b00
by Jürg Billeter at 2018-09-27T09:18:43Z
-
99e90918
by Jürg Billeter at 2018-09-27T09:19:12Z
-
b8f920e2
by Jürg Billeter at 2018-09-27T09:19:12Z
-
3d88c5d4
by Jürg Billeter at 2018-09-27T09:19:12Z
-
ed878cb6
by Jürg Billeter at 2018-09-27T09:19:12Z
-
fe82c9c2
by Jürg Billeter at 2018-09-27T09:19:12Z
-
0d0f700d
by Jürg Billeter at 2018-09-27T09:19:12Z
-
dd770ec3
by Jürg Billeter at 2018-09-27T09:19:12Z
-
44da8175
by Jürg Billeter at 2018-09-27T09:48:20Z
-
1378907a
by Jürg Billeter at 2018-09-27T10:08:01Z
-
9ea7cf8b
by Jürg Billeter at 2018-09-27T10:08:01Z
-
c283d5f8
by Jürg Billeter at 2018-09-27T10:15:21Z
30 changed files:
- .gitlab-ci.yml
- buildstream/_artifactcache/artifactcache.py
- buildstream/_artifactcache/cascache.py
- buildstream/_artifactcache/casserver.py
- buildstream/_context.py
- buildstream/_frontend/app.py
- buildstream/_loader/loader.py
- buildstream/_platform/linux.py
- buildstream/_platform/platform.py
- buildstream/_platform/unix.py
- + buildstream/_protos/google/rpc/code.proto
- + buildstream/_protos/google/rpc/code_pb2.py
- + buildstream/_protos/google/rpc/code_pb2_grpc.py
- buildstream/_scheduler/jobs/cachesizejob.py
- buildstream/_scheduler/jobs/cleanupjob.py
- buildstream/_scheduler/queues/buildqueue.py
- buildstream/_scheduler/scheduler.py
- buildstream/_stream.py
- buildstream/element.py
- buildstream/plugins/sources/git.py
- buildstream/sandbox/_sandboxremote.py
- buildstream/sandbox/sandbox.py
- buildstream/storage/_casbaseddirectory.py
- tests/artifactcache/pull.py
- tests/artifactcache/push.py
- + tests/frontend/project/elements/rebuild-target.bst
- + tests/frontend/rebuild.py
- tests/sources/git.py
- tests/testutils/artifactshare.py
- tests/testutils/repo/git.py
Changes:
... | ... | @@ -79,6 +79,8 @@ source_dist: |
79 | 79 |
- cd ../..
|
80 | 80 |
- mkdir -p coverage-linux/
|
81 | 81 |
- cp dist/buildstream/.coverage.* coverage-linux/coverage."${CI_JOB_NAME}"
|
82 |
+ except:
|
|
83 |
+ - schedules
|
|
82 | 84 |
artifacts:
|
83 | 85 |
paths:
|
84 | 86 |
- coverage-linux/
|
... | ... | @@ -127,6 +129,8 @@ tests-unix: |
127 | 129 |
- cd ../..
|
128 | 130 |
- mkdir -p coverage-unix/
|
129 | 131 |
- cp dist/buildstream/.coverage.* coverage-unix/coverage.unix
|
132 |
+ except:
|
|
133 |
+ - schedules
|
|
130 | 134 |
artifacts:
|
131 | 135 |
paths:
|
132 | 136 |
- coverage-unix/
|
... | ... | @@ -148,10 +152,41 @@ docs: |
148 | 152 |
- make BST_FORCE_SESSION_REBUILD=1 -C doc
|
149 | 153 |
- cd ../..
|
150 | 154 |
- mv dist/buildstream/doc/build/html public
|
155 |
+ except:
|
|
156 |
+ - schedules
|
|
151 | 157 |
artifacts:
|
152 | 158 |
paths:
|
153 | 159 |
- public/
|
154 | 160 |
|
161 |
+.overnight-tests: &overnight-tests-template
|
|
162 |
+ stage: test
|
|
163 |
+ variables:
|
|
164 |
+ bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
|
|
165 |
+ bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
166 |
+ fd_sdk_ref: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.12
|
|
167 |
+ before_script:
|
|
168 |
+ - (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
|
|
169 |
+ - pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
|
|
170 |
+ - git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
|
|
171 |
+ - git -C freedesktop-sdk checkout ${fd_sdk_ref}
|
|
172 |
+ only:
|
|
173 |
+ - schedules
|
|
174 |
+ |
|
175 |
+overnight-tests:
|
|
176 |
+ <<: *overnight-tests-template
|
|
177 |
+ script:
|
|
178 |
+ - make -C freedesktop-sdk
|
|
179 |
+ tags:
|
|
180 |
+ - overnight-tests
|
|
181 |
+ |
|
182 |
+overnight-tests-no-cache:
|
|
183 |
+ <<: *overnight-tests-template
|
|
184 |
+ script:
|
|
185 |
+ - sed -i '/artifacts:/,+1 d' freedesktop-sdk/bootstrap/project.conf
|
|
186 |
+ - sed -i '/artifacts:/,+1 d' freedesktop-sdk/project.conf
|
|
187 |
+ - make -C freedesktop-sdk
|
|
188 |
+ tags:
|
|
189 |
+ - overnight-tests
|
|
155 | 190 |
|
156 | 191 |
# Check code quality with gitlab's built-in feature.
|
157 | 192 |
#
|
... | ... | @@ -170,6 +205,8 @@ code_quality: |
170 | 205 |
--volume "$PWD":/code
|
171 | 206 |
--volume /var/run/docker.sock:/var/run/docker.sock
|
172 | 207 |
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
|
208 |
+ except:
|
|
209 |
+ - schedules
|
|
173 | 210 |
artifacts:
|
174 | 211 |
paths: [gl-code-quality-report.json]
|
175 | 212 |
|
... | ... | @@ -199,6 +236,8 @@ analysis: |
199 | 236 |
radon raw -s -j buildstream > analysis/raw.json
|
200 | 237 |
radon raw -s buildstream
|
201 | 238 |
|
239 |
+ except:
|
|
240 |
+ - schedules
|
|
202 | 241 |
artifacts:
|
203 | 242 |
paths:
|
204 | 243 |
- analysis/
|
... | ... | @@ -224,6 +263,8 @@ coverage: |
224 | 263 |
- tests-fedora-28
|
225 | 264 |
- tests-unix
|
226 | 265 |
- source_dist
|
266 |
+ except:
|
|
267 |
+ - schedules
|
|
227 | 268 |
|
228 | 269 |
# Deploy, only for merges which land on master branch.
|
229 | 270 |
#
|
... | ... | @@ -232,8 +273,14 @@ pages: |
232 | 273 |
dependencies:
|
233 | 274 |
- source_dist
|
234 | 275 |
- docs
|
276 |
+ variables:
|
|
277 |
+ ACME_DIR: public/.well-known/acme-challenge
|
|
235 | 278 |
script:
|
236 |
- - find public/
|
|
279 |
+ - mkdir -p ${ACME_DIR}
|
|
280 |
+ # Required to finish the creation of the Let's Encrypt certificate,
|
|
281 |
+ # which allows using https://docs.buildstream.build/ for accessing
|
|
282 |
+ # the documentation.
|
|
283 |
+ - echo ${ACME_CHALLENGE} > ${ACME_DIR}/$(echo ${ACME_CHALLENGE} | cut -c1-43)
|
|
237 | 284 |
artifacts:
|
238 | 285 |
paths:
|
239 | 286 |
- public/
|
... | ... | @@ -248,3 +295,5 @@ pages: |
248 | 295 |
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/35141
|
249 | 296 |
#
|
250 | 297 |
- master
|
298 |
+ except:
|
|
299 |
+ - schedules
|
... | ... | @@ -383,6 +383,13 @@ class ArtifactCache(): |
383 | 383 |
# Abstract methods for subclasses to implement #
|
384 | 384 |
################################################
|
385 | 385 |
|
386 |
+ # preflight():
|
|
387 |
+ #
|
|
388 |
+ # Preflight check.
|
|
389 |
+ #
|
|
390 |
+ def preflight(self):
|
|
391 |
+ pass
|
|
392 |
+ |
|
386 | 393 |
# update_atime()
|
387 | 394 |
#
|
388 | 395 |
# Update the atime of an artifact.
|
... | ... | @@ -44,12 +44,16 @@ from .._exceptions import ArtifactError |
44 | 44 |
from . import ArtifactCache
|
45 | 45 |
|
46 | 46 |
|
47 |
+# The default limit for gRPC messages is 4 MiB.
|
|
48 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
49 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
50 |
+ |
|
51 |
+ |
|
47 | 52 |
# A CASCache manages artifacts in a CAS repository as specified in the
|
48 | 53 |
# Remote Execution API.
|
49 | 54 |
#
|
50 | 55 |
# Args:
|
51 | 56 |
# context (Context): The BuildStream context
|
52 |
-# enable_push (bool): Whether pushing is allowed by the platform
|
|
53 | 57 |
#
|
54 | 58 |
# Pushing is explicitly disabled by the platform in some cases,
|
55 | 59 |
# like when we are falling back to functioning without using
|
... | ... | @@ -57,7 +61,7 @@ from . import ArtifactCache |
57 | 61 |
#
|
58 | 62 |
class CASCache(ArtifactCache):
|
59 | 63 |
|
60 |
- def __init__(self, context, *, enable_push=True):
|
|
64 |
+ def __init__(self, context):
|
|
61 | 65 |
super().__init__(context)
|
62 | 66 |
|
63 | 67 |
self.casdir = os.path.join(context.artifactdir, 'cas')
|
... | ... | @@ -66,8 +70,6 @@ class CASCache(ArtifactCache): |
66 | 70 |
|
67 | 71 |
self._calculate_cache_quota()
|
68 | 72 |
|
69 |
- self._enable_push = enable_push
|
|
70 |
- |
|
71 | 73 |
# Per-project list of _CASRemote instances.
|
72 | 74 |
self._remotes = {}
|
73 | 75 |
|
... | ... | @@ -78,6 +80,12 @@ class CASCache(ArtifactCache): |
78 | 80 |
# Implementation of abstract methods #
|
79 | 81 |
################################################
|
80 | 82 |
|
83 |
+ def preflight(self):
|
|
84 |
+ if (not os.path.isdir(os.path.join(self.casdir, 'refs', 'heads')) or
|
|
85 |
+ not os.path.isdir(os.path.join(self.casdir, 'objects'))):
|
|
86 |
+ raise ArtifactError("CAS repository check failed for '{}'"
|
|
87 |
+ .format(self.casdir))
|
|
88 |
+ |
|
81 | 89 |
def contains(self, element, key):
|
82 | 90 |
refpath = self._refpath(self.get_artifact_fullname(element, key))
|
83 | 91 |
|
... | ... | @@ -209,7 +217,7 @@ class CASCache(ArtifactCache): |
209 | 217 |
return bool(remotes_for_project)
|
210 | 218 |
|
211 | 219 |
def has_push_remotes(self, *, element=None):
|
212 |
- if not self._has_push_remotes or not self._enable_push:
|
|
220 |
+ if not self._has_push_remotes:
|
|
213 | 221 |
# No project has push remotes
|
214 | 222 |
return False
|
215 | 223 |
elif element is None:
|
... | ... | @@ -854,6 +862,80 @@ class CASCache(ArtifactCache): |
854 | 862 |
|
855 | 863 |
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
|
856 | 864 |
|
865 |
+ # _ensure_blob():
|
|
866 |
+ #
|
|
867 |
+ # Fetch and add blob if it's not already local.
|
|
868 |
+ #
|
|
869 |
+ # Args:
|
|
870 |
+ # remote (Remote): The remote to use.
|
|
871 |
+ # digest (Digest): Digest object for the blob to fetch.
|
|
872 |
+ #
|
|
873 |
+ # Returns:
|
|
874 |
+ # (str): The path of the object
|
|
875 |
+ #
|
|
876 |
+ def _ensure_blob(self, remote, digest):
|
|
877 |
+ objpath = self.objpath(digest)
|
|
878 |
+ if os.path.exists(objpath):
|
|
879 |
+ # already in local repository
|
|
880 |
+ return objpath
|
|
881 |
+ |
|
882 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
883 |
+ self._fetch_blob(remote, digest, f)
|
|
884 |
+ |
|
885 |
+ added_digest = self.add_object(path=f.name)
|
|
886 |
+ assert added_digest.hash == digest.hash
|
|
887 |
+ |
|
888 |
+ return objpath
|
|
889 |
+ |
|
890 |
+ def _batch_download_complete(self, batch):
|
|
891 |
+ for digest, data in batch.send():
|
|
892 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
893 |
+ f.write(data)
|
|
894 |
+ f.flush()
|
|
895 |
+ |
|
896 |
+ added_digest = self.add_object(path=f.name)
|
|
897 |
+ assert added_digest.hash == digest.hash
|
|
898 |
+ |
|
899 |
+ # Helper function for _fetch_directory().
|
|
900 |
+ def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
|
|
901 |
+ self._batch_download_complete(batch)
|
|
902 |
+ |
|
903 |
+ # All previously scheduled directories are now locally available,
|
|
904 |
+ # move them to the processing queue.
|
|
905 |
+ fetch_queue.extend(fetch_next_queue)
|
|
906 |
+ fetch_next_queue.clear()
|
|
907 |
+ return _CASBatchRead(remote)
|
|
908 |
+ |
|
909 |
+ # Helper function for _fetch_directory().
|
|
910 |
+ def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
|
|
911 |
+ in_local_cache = os.path.exists(self.objpath(digest))
|
|
912 |
+ |
|
913 |
+ if in_local_cache:
|
|
914 |
+ # Skip download, already in local cache.
|
|
915 |
+ pass
|
|
916 |
+ elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
917 |
+ not remote.batch_read_supported):
|
|
918 |
+ # Too large for batch request, download in independent request.
|
|
919 |
+ self._ensure_blob(remote, digest)
|
|
920 |
+ in_local_cache = True
|
|
921 |
+ else:
|
|
922 |
+ if not batch.add(digest):
|
|
923 |
+ # Not enough space left in batch request.
|
|
924 |
+ # Complete pending batch first.
|
|
925 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
926 |
+ batch.add(digest)
|
|
927 |
+ |
|
928 |
+ if recursive:
|
|
929 |
+ if in_local_cache:
|
|
930 |
+ # Add directory to processing queue.
|
|
931 |
+ fetch_queue.append(digest)
|
|
932 |
+ else:
|
|
933 |
+ # Directory will be available after completing pending batch.
|
|
934 |
+ # Add directory to deferred processing queue.
|
|
935 |
+ fetch_next_queue.append(digest)
|
|
936 |
+ |
|
937 |
+ return batch
|
|
938 |
+ |
|
857 | 939 |
# _fetch_directory():
|
858 | 940 |
#
|
859 | 941 |
# Fetches remote directory and adds it to content addressable store.
|
... | ... | @@ -867,39 +949,32 @@ class CASCache(ArtifactCache): |
867 | 949 |
# dir_digest (Digest): Digest object for the directory to fetch.
|
868 | 950 |
#
|
869 | 951 |
def _fetch_directory(self, remote, dir_digest):
|
870 |
- objpath = self.objpath(dir_digest)
|
|
871 |
- if os.path.exists(objpath):
|
|
872 |
- # already in local cache
|
|
873 |
- return
|
|
874 |
- |
|
875 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
|
|
876 |
- self._fetch_blob(remote, dir_digest, out)
|
|
877 |
- |
|
878 |
- directory = remote_execution_pb2.Directory()
|
|
952 |
+ fetch_queue = [dir_digest]
|
|
953 |
+ fetch_next_queue = []
|
|
954 |
+ batch = _CASBatchRead(remote)
|
|
879 | 955 |
|
880 |
- with open(out.name, 'rb') as f:
|
|
881 |
- directory.ParseFromString(f.read())
|
|
956 |
+ while len(fetch_queue) + len(fetch_next_queue) > 0:
|
|
957 |
+ if len(fetch_queue) == 0:
|
|
958 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
882 | 959 |
|
883 |
- for filenode in directory.files:
|
|
884 |
- fileobjpath = self.objpath(filenode.digest)
|
|
885 |
- if os.path.exists(fileobjpath):
|
|
886 |
- # already in local cache
|
|
887 |
- continue
|
|
960 |
+ dir_digest = fetch_queue.pop(0)
|
|
888 | 961 |
|
889 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
890 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
962 |
+ objpath = self._ensure_blob(remote, dir_digest)
|
|
891 | 963 |
|
892 |
- digest = self.add_object(path=f.name)
|
|
893 |
- assert digest.hash == filenode.digest.hash
|
|
964 |
+ directory = remote_execution_pb2.Directory()
|
|
965 |
+ with open(objpath, 'rb') as f:
|
|
966 |
+ directory.ParseFromString(f.read())
|
|
894 | 967 |
|
895 | 968 |
for dirnode in directory.directories:
|
896 |
- self._fetch_directory(remote, dirnode.digest)
|
|
969 |
+ batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
|
970 |
+ fetch_queue, fetch_next_queue, recursive=True)
|
|
971 |
+ |
|
972 |
+ for filenode in directory.files:
|
|
973 |
+ batch = self._fetch_directory_node(remote, filenode.digest, batch,
|
|
974 |
+ fetch_queue, fetch_next_queue)
|
|
897 | 975 |
|
898 |
- # Place directory blob only in final location when we've
|
|
899 |
- # downloaded all referenced blobs to avoid dangling
|
|
900 |
- # references in the repository.
|
|
901 |
- digest = self.add_object(path=out.name)
|
|
902 |
- assert digest.hash == dir_digest.hash
|
|
976 |
+ # Fetch final batch
|
|
977 |
+ self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
903 | 978 |
|
904 | 979 |
def _fetch_tree(self, remote, digest):
|
905 | 980 |
# download but do not store the Tree object
|
... | ... | @@ -914,16 +989,7 @@ class CASCache(ArtifactCache): |
914 | 989 |
tree.children.extend([tree.root])
|
915 | 990 |
for directory in tree.children:
|
916 | 991 |
for filenode in directory.files:
|
917 |
- fileobjpath = self.objpath(filenode.digest)
|
|
918 |
- if os.path.exists(fileobjpath):
|
|
919 |
- # already in local cache
|
|
920 |
- continue
|
|
921 |
- |
|
922 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
923 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
924 |
- |
|
925 |
- added_digest = self.add_object(path=f.name)
|
|
926 |
- assert added_digest.hash == filenode.digest.hash
|
|
992 |
+ self._ensure_blob(remote, filenode.digest)
|
|
927 | 993 |
|
928 | 994 |
# place directory blob only in final location when we've downloaded
|
929 | 995 |
# all referenced blobs to avoid dangling references in the repository
|
... | ... | @@ -942,12 +1008,12 @@ class CASCache(ArtifactCache): |
942 | 1008 |
finished = False
|
943 | 1009 |
remaining = digest.size_bytes
|
944 | 1010 |
while not finished:
|
945 |
- chunk_size = min(remaining, 64 * 1024)
|
|
1011 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
946 | 1012 |
remaining -= chunk_size
|
947 | 1013 |
|
948 | 1014 |
request = bytestream_pb2.WriteRequest()
|
949 | 1015 |
request.write_offset = offset
|
950 |
- # max. 64 kB chunks
|
|
1016 |
+ # max. _MAX_PAYLOAD_BYTES chunks
|
|
951 | 1017 |
request.data = instream.read(chunk_size)
|
952 | 1018 |
request.resource_name = resname
|
953 | 1019 |
request.finish_write = remaining <= 0
|
... | ... | @@ -1035,11 +1101,78 @@ class _CASRemote(): |
1035 | 1101 |
|
1036 | 1102 |
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
|
1037 | 1103 |
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
|
1104 |
+ self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
|
|
1038 | 1105 |
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
|
1039 | 1106 |
|
1107 |
+ self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
1108 |
+ try:
|
|
1109 |
+ request = remote_execution_pb2.GetCapabilitiesRequest()
|
|
1110 |
+ response = self.capabilities.GetCapabilities(request)
|
|
1111 |
+ server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
|
|
1112 |
+ if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
|
|
1113 |
+ self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
|
|
1114 |
+ except grpc.RpcError as e:
|
|
1115 |
+ # Simply use the defaults for servers that don't implement GetCapabilities()
|
|
1116 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1117 |
+ raise
|
|
1118 |
+ |
|
1119 |
+ # Check whether the server supports BatchReadBlobs()
|
|
1120 |
+ self.batch_read_supported = False
|
|
1121 |
+ try:
|
|
1122 |
+ request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1123 |
+ response = self.cas.BatchReadBlobs(request)
|
|
1124 |
+ self.batch_read_supported = True
|
|
1125 |
+ except grpc.RpcError as e:
|
|
1126 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1127 |
+ raise
|
|
1128 |
+ |
|
1040 | 1129 |
self._initialized = True
|
1041 | 1130 |
|
1042 | 1131 |
|
1132 |
+# Represents a batch of blobs queued for fetching.
|
|
1133 |
+#
|
|
1134 |
+class _CASBatchRead():
|
|
1135 |
+ def __init__(self, remote):
|
|
1136 |
+ self._remote = remote
|
|
1137 |
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
|
|
1138 |
+ self._request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1139 |
+ self._size = 0
|
|
1140 |
+ self._sent = False
|
|
1141 |
+ |
|
1142 |
+ def add(self, digest):
|
|
1143 |
+ assert not self._sent
|
|
1144 |
+ |
|
1145 |
+ new_batch_size = self._size + digest.size_bytes
|
|
1146 |
+ if new_batch_size > self._max_total_size_bytes:
|
|
1147 |
+ # Not enough space left in current batch
|
|
1148 |
+ return False
|
|
1149 |
+ |
|
1150 |
+ request_digest = self._request.digests.add()
|
|
1151 |
+ request_digest.hash = digest.hash
|
|
1152 |
+ request_digest.size_bytes = digest.size_bytes
|
|
1153 |
+ self._size = new_batch_size
|
|
1154 |
+ return True
|
|
1155 |
+ |
|
1156 |
+ def send(self):
|
|
1157 |
+ assert not self._sent
|
|
1158 |
+ self._sent = True
|
|
1159 |
+ |
|
1160 |
+ if len(self._request.digests) == 0:
|
|
1161 |
+ return
|
|
1162 |
+ |
|
1163 |
+ batch_response = self._remote.cas.BatchReadBlobs(self._request)
|
|
1164 |
+ |
|
1165 |
+ for response in batch_response.responses:
|
|
1166 |
+ if response.status.code != grpc.StatusCode.OK.value[0]:
|
|
1167 |
+ raise ArtifactError("Failed to download blob {}: {}".format(
|
|
1168 |
+ response.digest.hash, response.status.code))
|
|
1169 |
+ if response.digest.size_bytes != len(response.data):
|
|
1170 |
+ raise ArtifactError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
|
|
1171 |
+ response.digest.hash, response.digest.size_bytes, len(response.data)))
|
|
1172 |
+ |
|
1173 |
+ yield (response.digest, response.data)
|
|
1174 |
+ |
|
1175 |
+ |
|
1043 | 1176 |
def _grouper(iterable, n):
|
1044 | 1177 |
while True:
|
1045 | 1178 |
try:
|
... | ... | @@ -35,11 +35,10 @@ from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc |
35 | 35 |
from .._exceptions import ArtifactError
|
36 | 36 |
from .._context import Context
|
37 | 37 |
|
38 |
-from .cascache import CASCache
|
|
39 | 38 |
|
40 |
- |
|
41 |
-# The default limit for gRPC messages is 4 MiB
|
|
42 |
-_MAX_BATCH_TOTAL_SIZE_BYTES = 4 * 1024 * 1024
|
|
39 |
+# The default limit for gRPC messages is 4 MiB.
|
|
40 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
41 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
43 | 42 |
|
44 | 43 |
|
45 | 44 |
# Trying to push an artifact that is too large
|
... | ... | @@ -59,7 +58,7 @@ def create_server(repo, *, enable_push): |
59 | 58 |
context = Context()
|
60 | 59 |
context.artifactdir = os.path.abspath(repo)
|
61 | 60 |
|
62 |
- artifactcache = CASCache(context)
|
|
61 |
+ artifactcache = context.artifactcache
|
|
63 | 62 |
|
64 | 63 |
# Use max_workers default from Python 3.5+
|
65 | 64 |
max_workers = (os.cpu_count() or 1) * 5
|
... | ... | @@ -158,7 +157,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): |
158 | 157 |
|
159 | 158 |
remaining = client_digest.size_bytes - request.read_offset
|
160 | 159 |
while remaining > 0:
|
161 |
- chunk_size = min(remaining, 64 * 1024)
|
|
160 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
162 | 161 |
remaining -= chunk_size
|
163 | 162 |
|
164 | 163 |
response = bytestream_pb2.ReadResponse()
|
... | ... | @@ -242,7 +241,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres |
242 | 241 |
|
243 | 242 |
for digest in request.digests:
|
244 | 243 |
batch_size += digest.size_bytes
|
245 |
- if batch_size > _MAX_BATCH_TOTAL_SIZE_BYTES:
|
|
244 |
+ if batch_size > _MAX_PAYLOAD_BYTES:
|
|
246 | 245 |
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
247 | 246 |
return response
|
248 | 247 |
|
... | ... | @@ -269,7 +268,7 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): |
269 | 268 |
cache_capabilities = response.cache_capabilities
|
270 | 269 |
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
|
271 | 270 |
cache_capabilities.action_cache_update_capabilities.update_enabled = False
|
272 |
- cache_capabilities.max_batch_total_size_bytes = _MAX_BATCH_TOTAL_SIZE_BYTES
|
|
271 |
+ cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
273 | 272 |
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
|
274 | 273 |
|
275 | 274 |
response.deprecated_api_version.major = 2
|
... | ... | @@ -30,6 +30,7 @@ from ._exceptions import LoadError, LoadErrorReason, BstError |
30 | 30 |
from ._message import Message, MessageType
|
31 | 31 |
from ._profile import Topics, profile_start, profile_end
|
32 | 32 |
from ._artifactcache import ArtifactCache
|
33 |
+from ._artifactcache.cascache import CASCache
|
|
33 | 34 |
from ._workspaces import Workspaces
|
34 | 35 |
from .plugin import _plugin_lookup
|
35 | 36 |
|
... | ... | @@ -113,6 +114,7 @@ class Context(): |
113 | 114 |
self._cache_key = None
|
114 | 115 |
self._message_handler = None
|
115 | 116 |
self._message_depth = deque()
|
117 |
+ self._artifactcache = None
|
|
116 | 118 |
self._projects = []
|
117 | 119 |
self._project_overrides = {}
|
118 | 120 |
self._workspaces = None
|
... | ... | @@ -227,6 +229,13 @@ class Context(): |
227 | 229 |
"{}: on-error should be one of: {}".format(
|
228 | 230 |
provenance, ", ".join(valid_actions)))
|
229 | 231 |
|
232 |
+ @property
|
|
233 |
+ def artifactcache(self):
|
|
234 |
+ if not self._artifactcache:
|
|
235 |
+ self._artifactcache = CASCache(self)
|
|
236 |
+ |
|
237 |
+ return self._artifactcache
|
|
238 |
+ |
|
230 | 239 |
# add_project():
|
231 | 240 |
#
|
232 | 241 |
# Add a project to the context.
|
... | ... | @@ -198,10 +198,15 @@ class App(): |
198 | 198 |
if option_value is not None:
|
199 | 199 |
setattr(self.context, context_attr, option_value)
|
200 | 200 |
try:
|
201 |
- Platform.create_instance(self.context)
|
|
201 |
+ Platform.get_platform()
|
|
202 | 202 |
except BstError as e:
|
203 | 203 |
self._error_exit(e, "Error instantiating platform")
|
204 | 204 |
|
205 |
+ try:
|
|
206 |
+ self.context.artifactcache.preflight()
|
|
207 |
+ except BstError as e:
|
|
208 |
+ self._error_exit(e, "Error instantiating artifact cache")
|
|
209 |
+ |
|
205 | 210 |
# Create the logger right before setting the message handler
|
206 | 211 |
self.logger = LogLine(self.context,
|
207 | 212 |
self._content_profile,
|
... | ... | @@ -28,7 +28,6 @@ from .. import Consistency |
28 | 28 |
from .. import _yaml
|
29 | 29 |
from ..element import Element
|
30 | 30 |
from .._profile import Topics, profile_start, profile_end
|
31 |
-from .._platform import Platform
|
|
32 | 31 |
from .._includes import Includes
|
33 | 32 |
|
34 | 33 |
from .types import Symbol, Dependency
|
... | ... | @@ -518,8 +517,7 @@ class Loader(): |
518 | 517 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
519 | 518 |
"{}: Expected junction but element kind is {}".format(filename, meta_element.kind))
|
520 | 519 |
|
521 |
- platform = Platform.get_platform()
|
|
522 |
- element = Element._new_from_meta(meta_element, platform.artifactcache)
|
|
520 |
+ element = Element._new_from_meta(meta_element, self._context.artifactcache)
|
|
523 | 521 |
element._preflight()
|
524 | 522 |
|
525 | 523 |
sources = list(element.sources())
|
... | ... | @@ -17,11 +17,11 @@ |
17 | 17 |
# Authors:
|
18 | 18 |
# Tristan Maat <tristan maat codethink co uk>
|
19 | 19 |
|
20 |
+import os
|
|
20 | 21 |
import subprocess
|
21 | 22 |
|
22 | 23 |
from .. import _site
|
23 | 24 |
from .. import utils
|
24 |
-from .._artifactcache.cascache import CASCache
|
|
25 | 25 |
from .._message import Message, MessageType
|
26 | 26 |
from ..sandbox import SandboxBwrap
|
27 | 27 |
|
... | ... | @@ -30,17 +30,15 @@ from . import Platform |
30 | 30 |
|
31 | 31 |
class Linux(Platform):
|
32 | 32 |
|
33 |
- def __init__(self, context):
|
|
33 |
+ def __init__(self):
|
|
34 | 34 |
|
35 |
- super().__init__(context)
|
|
35 |
+ super().__init__()
|
|
36 | 36 |
|
37 |
- self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
|
|
38 |
- self._user_ns_available = self._check_user_ns_available(context)
|
|
39 |
- self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
|
|
37 |
+ self._uid = os.geteuid()
|
|
38 |
+ self._gid = os.getegid()
|
|
40 | 39 |
|
41 |
- @property
|
|
42 |
- def artifactcache(self):
|
|
43 |
- return self._artifact_cache
|
|
40 |
+ self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
|
|
41 |
+ self._user_ns_available = self._check_user_ns_available()
|
|
44 | 42 |
|
45 | 43 |
def create_sandbox(self, *args, **kwargs):
|
46 | 44 |
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
|
... | ... | @@ -48,10 +46,19 @@ class Linux(Platform): |
48 | 46 |
kwargs['die_with_parent_available'] = self._die_with_parent_available
|
49 | 47 |
return SandboxBwrap(*args, **kwargs)
|
50 | 48 |
|
49 |
+ def check_sandbox_config(self, config):
|
|
50 |
+ if self._user_ns_available:
|
|
51 |
+ # User namespace support allows arbitrary build UID/GID settings.
|
|
52 |
+ return True
|
|
53 |
+ else:
|
|
54 |
+ # Without user namespace support, the UID/GID in the sandbox
|
|
55 |
+ # will match the host UID/GID.
|
|
56 |
+ return config.build_uid == self._uid and config.build_gid == self._gid
|
|
57 |
+ |
|
51 | 58 |
################################################
|
52 | 59 |
# Private Methods #
|
53 | 60 |
################################################
|
54 |
- def _check_user_ns_available(self, context):
|
|
61 |
+ def _check_user_ns_available(self):
|
|
55 | 62 |
|
56 | 63 |
# Here, lets check if bwrap is able to create user namespaces,
|
57 | 64 |
# issue a warning if it's not available, and save the state
|
... | ... | @@ -75,9 +82,4 @@ class Linux(Platform): |
75 | 82 |
return True
|
76 | 83 |
|
77 | 84 |
else:
|
78 |
- context.message(
|
|
79 |
- Message(None, MessageType.WARN,
|
|
80 |
- "Unable to create user namespaces with bubblewrap, resorting to fallback",
|
|
81 |
- detail="Some builds may not function due to lack of uid / gid 0, " +
|
|
82 |
- "artifacts created will not be trusted for push purposes."))
|
|
83 | 85 |
return False
|
... | ... | @@ -29,17 +29,13 @@ class Platform(): |
29 | 29 |
# Platform()
|
30 | 30 |
#
|
31 | 31 |
# A class to manage platform-specific details. Currently holds the
|
32 |
- # sandbox factory, the artifact cache and staging operations, as
|
|
33 |
- # well as platform helpers.
|
|
32 |
+ # sandbox factory as well as platform helpers.
|
|
34 | 33 |
#
|
35 |
- # Args:
|
|
36 |
- # context (context): The project context
|
|
37 |
- #
|
|
38 |
- def __init__(self, context):
|
|
39 |
- self.context = context
|
|
34 |
+ def __init__(self):
|
|
35 |
+ pass
|
|
40 | 36 |
|
41 | 37 |
@classmethod
|
42 |
- def create_instance(cls, *args, **kwargs):
|
|
38 |
+ def _create_instance(cls):
|
|
43 | 39 |
if sys.platform.startswith('linux'):
|
44 | 40 |
backend = 'linux'
|
45 | 41 |
else:
|
... | ... | @@ -58,22 +54,14 @@ class Platform(): |
58 | 54 |
else:
|
59 | 55 |
raise PlatformError("No such platform: '{}'".format(backend))
|
60 | 56 |
|
61 |
- cls._instance = PlatformImpl(*args, **kwargs)
|
|
57 |
+ cls._instance = PlatformImpl()
|
|
62 | 58 |
|
63 | 59 |
@classmethod
|
64 | 60 |
def get_platform(cls):
|
65 | 61 |
if not cls._instance:
|
66 |
- raise PlatformError("Platform needs to be initialized first")
|
|
62 |
+ cls._create_instance()
|
|
67 | 63 |
return cls._instance
|
68 | 64 |
|
69 |
- ##################################################################
|
|
70 |
- # Platform properties #
|
|
71 |
- ##################################################################
|
|
72 |
- @property
|
|
73 |
- def artifactcache(self):
|
|
74 |
- raise ImplError("Platform {platform} does not implement an artifactcache"
|
|
75 |
- .format(platform=type(self).__name__))
|
|
76 |
- |
|
77 | 65 |
##################################################################
|
78 | 66 |
# Sandbox functions #
|
79 | 67 |
##################################################################
|
... | ... | @@ -92,3 +80,7 @@ class Platform(): |
92 | 80 |
def create_sandbox(self, *args, **kwargs):
|
93 | 81 |
raise ImplError("Platform {platform} does not implement create_sandbox()"
|
94 | 82 |
.format(platform=type(self).__name__))
|
83 |
+ |
|
84 |
+ def check_sandbox_config(self, config):
|
|
85 |
+ raise ImplError("Platform {platform} does not implement check_sandbox_config()"
|
|
86 |
+ .format(platform=type(self).__name__))
|
... | ... | @@ -19,7 +19,6 @@ |
19 | 19 |
|
20 | 20 |
import os
|
21 | 21 |
|
22 |
-from .._artifactcache.cascache import CASCache
|
|
23 | 22 |
from .._exceptions import PlatformError
|
24 | 23 |
from ..sandbox import SandboxChroot
|
25 | 24 |
|
... | ... | @@ -28,18 +27,21 @@ from . import Platform |
28 | 27 |
|
29 | 28 |
class Unix(Platform):
|
30 | 29 |
|
31 |
- def __init__(self, context):
|
|
30 |
+ def __init__(self):
|
|
32 | 31 |
|
33 |
- super().__init__(context)
|
|
34 |
- self._artifact_cache = CASCache(context)
|
|
32 |
+ super().__init__()
|
|
33 |
+ |
|
34 |
+ self._uid = os.geteuid()
|
|
35 |
+ self._gid = os.getegid()
|
|
35 | 36 |
|
36 | 37 |
# Not necessarily 100% reliable, but we want to fail early.
|
37 |
- if os.geteuid() != 0:
|
|
38 |
+ if self._uid != 0:
|
|
38 | 39 |
raise PlatformError("Root privileges are required to run without bubblewrap.")
|
39 | 40 |
|
40 |
- @property
|
|
41 |
- def artifactcache(self):
|
|
42 |
- return self._artifact_cache
|
|
43 |
- |
|
44 | 41 |
def create_sandbox(self, *args, **kwargs):
|
45 | 42 |
return SandboxChroot(*args, **kwargs)
|
43 |
+ |
|
44 |
+ def check_sandbox_config(self, config):
|
|
45 |
+ # With the chroot sandbox, the UID/GID in the sandbox
|
|
46 |
+ # will match the host UID/GID (typically 0/0).
|
|
47 |
+ return config.build_uid == self._uid and config.build_gid == self._gid
|
1 |
+// Copyright 2017 Google Inc.
|
|
2 |
+//
|
|
3 |
+// Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+// you may not use this file except in compliance with the License.
|
|
5 |
+// You may obtain a copy of the License at
|
|
6 |
+//
|
|
7 |
+// http://www.apache.org/licenses/LICENSE-2.0
|
|
8 |
+//
|
|
9 |
+// Unless required by applicable law or agreed to in writing, software
|
|
10 |
+// distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+// See the License for the specific language governing permissions and
|
|
13 |
+// limitations under the License.
|
|
14 |
+ |
|
15 |
+syntax = "proto3";
|
|
16 |
+ |
|
17 |
+package google.rpc;
|
|
18 |
+ |
|
19 |
+option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
|
|
20 |
+option java_multiple_files = true;
|
|
21 |
+option java_outer_classname = "CodeProto";
|
|
22 |
+option java_package = "com.google.rpc";
|
|
23 |
+option objc_class_prefix = "RPC";
|
|
24 |
+ |
|
25 |
+ |
|
26 |
+// The canonical error codes for Google APIs.
|
|
27 |
+//
|
|
28 |
+//
|
|
29 |
+// Sometimes multiple error codes may apply. Services should return
|
|
30 |
+// the most specific error code that applies. For example, prefer
|
|
31 |
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
|
|
32 |
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
|
|
33 |
+enum Code {
|
|
34 |
+ // Not an error; returned on success
|
|
35 |
+ //
|
|
36 |
+ // HTTP Mapping: 200 OK
|
|
37 |
+ OK = 0;
|
|
38 |
+ |
|
39 |
+ // The operation was cancelled, typically by the caller.
|
|
40 |
+ //
|
|
41 |
+ // HTTP Mapping: 499 Client Closed Request
|
|
42 |
+ CANCELLED = 1;
|
|
43 |
+ |
|
44 |
+ // Unknown error. For example, this error may be returned when
|
|
45 |
+ // a `Status` value received from another address space belongs to
|
|
46 |
+ // an error space that is not known in this address space. Also
|
|
47 |
+ // errors raised by APIs that do not return enough error information
|
|
48 |
+ // may be converted to this error.
|
|
49 |
+ //
|
|
50 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
51 |
+ UNKNOWN = 2;
|
|
52 |
+ |
|
53 |
+ // The client specified an invalid argument. Note that this differs
|
|
54 |
+ // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
|
|
55 |
+ // that are problematic regardless of the state of the system
|
|
56 |
+ // (e.g., a malformed file name).
|
|
57 |
+ //
|
|
58 |
+ // HTTP Mapping: 400 Bad Request
|
|
59 |
+ INVALID_ARGUMENT = 3;
|
|
60 |
+ |
|
61 |
+ // The deadline expired before the operation could complete. For operations
|
|
62 |
+ // that change the state of the system, this error may be returned
|
|
63 |
+ // even if the operation has completed successfully. For example, a
|
|
64 |
+ // successful response from a server could have been delayed long
|
|
65 |
+ // enough for the deadline to expire.
|
|
66 |
+ //
|
|
67 |
+ // HTTP Mapping: 504 Gateway Timeout
|
|
68 |
+ DEADLINE_EXCEEDED = 4;
|
|
69 |
+ |
|
70 |
+ // Some requested entity (e.g., file or directory) was not found.
|
|
71 |
+ //
|
|
72 |
+ // Note to server developers: if a request is denied for an entire class
|
|
73 |
+ // of users, such as gradual feature rollout or undocumented whitelist,
|
|
74 |
+ // `NOT_FOUND` may be used. If a request is denied for some users within
|
|
75 |
+ // a class of users, such as user-based access control, `PERMISSION_DENIED`
|
|
76 |
+ // must be used.
|
|
77 |
+ //
|
|
78 |
+ // HTTP Mapping: 404 Not Found
|
|
79 |
+ NOT_FOUND = 5;
|
|
80 |
+ |
|
81 |
+ // The entity that a client attempted to create (e.g., file or directory)
|
|
82 |
+ // already exists.
|
|
83 |
+ //
|
|
84 |
+ // HTTP Mapping: 409 Conflict
|
|
85 |
+ ALREADY_EXISTS = 6;
|
|
86 |
+ |
|
87 |
+ // The caller does not have permission to execute the specified
|
|
88 |
+ // operation. `PERMISSION_DENIED` must not be used for rejections
|
|
89 |
+ // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
|
|
90 |
+ // instead for those errors). `PERMISSION_DENIED` must not be
|
|
91 |
+ // used if the caller can not be identified (use `UNAUTHENTICATED`
|
|
92 |
+ // instead for those errors). This error code does not imply the
|
|
93 |
+ // request is valid or the requested entity exists or satisfies
|
|
94 |
+ // other pre-conditions.
|
|
95 |
+ //
|
|
96 |
+ // HTTP Mapping: 403 Forbidden
|
|
97 |
+ PERMISSION_DENIED = 7;
|
|
98 |
+ |
|
99 |
+ // The request does not have valid authentication credentials for the
|
|
100 |
+ // operation.
|
|
101 |
+ //
|
|
102 |
+ // HTTP Mapping: 401 Unauthorized
|
|
103 |
+ UNAUTHENTICATED = 16;
|
|
104 |
+ |
|
105 |
+ // Some resource has been exhausted, perhaps a per-user quota, or
|
|
106 |
+ // perhaps the entire file system is out of space.
|
|
107 |
+ //
|
|
108 |
+ // HTTP Mapping: 429 Too Many Requests
|
|
109 |
+ RESOURCE_EXHAUSTED = 8;
|
|
110 |
+ |
|
111 |
+ // The operation was rejected because the system is not in a state
|
|
112 |
+ // required for the operation's execution. For example, the directory
|
|
113 |
+ // to be deleted is non-empty, an rmdir operation is applied to
|
|
114 |
+ // a non-directory, etc.
|
|
115 |
+ //
|
|
116 |
+ // Service implementors can use the following guidelines to decide
|
|
117 |
+ // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
|
|
118 |
+ // (a) Use `UNAVAILABLE` if the client can retry just the failing call.
|
|
119 |
+ // (b) Use `ABORTED` if the client should retry at a higher level
|
|
120 |
+ // (e.g., when a client-specified test-and-set fails, indicating the
|
|
121 |
+ // client should restart a read-modify-write sequence).
|
|
122 |
+ // (c) Use `FAILED_PRECONDITION` if the client should not retry until
|
|
123 |
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
|
|
124 |
+ // fails because the directory is non-empty, `FAILED_PRECONDITION`
|
|
125 |
+ // should be returned since the client should not retry unless
|
|
126 |
+ // the files are deleted from the directory.
|
|
127 |
+ //
|
|
128 |
+ // HTTP Mapping: 400 Bad Request
|
|
129 |
+ FAILED_PRECONDITION = 9;
|
|
130 |
+ |
|
131 |
+ // The operation was aborted, typically due to a concurrency issue such as
|
|
132 |
+ // a sequencer check failure or transaction abort.
|
|
133 |
+ //
|
|
134 |
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
135 |
+ // `ABORTED`, and `UNAVAILABLE`.
|
|
136 |
+ //
|
|
137 |
+ // HTTP Mapping: 409 Conflict
|
|
138 |
+ ABORTED = 10;
|
|
139 |
+ |
|
140 |
+ // The operation was attempted past the valid range. E.g., seeking or
|
|
141 |
+ // reading past end-of-file.
|
|
142 |
+ //
|
|
143 |
+ // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
|
|
144 |
+ // be fixed if the system state changes. For example, a 32-bit file
|
|
145 |
+ // system will generate `INVALID_ARGUMENT` if asked to read at an
|
|
146 |
+ // offset that is not in the range [0,2^32-1], but it will generate
|
|
147 |
+ // `OUT_OF_RANGE` if asked to read from an offset past the current
|
|
148 |
+ // file size.
|
|
149 |
+ //
|
|
150 |
+ // There is a fair bit of overlap between `FAILED_PRECONDITION` and
|
|
151 |
+ // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
|
|
152 |
+ // error) when it applies so that callers who are iterating through
|
|
153 |
+ // a space can easily look for an `OUT_OF_RANGE` error to detect when
|
|
154 |
+ // they are done.
|
|
155 |
+ //
|
|
156 |
+ // HTTP Mapping: 400 Bad Request
|
|
157 |
+ OUT_OF_RANGE = 11;
|
|
158 |
+ |
|
159 |
+ // The operation is not implemented or is not supported/enabled in this
|
|
160 |
+ // service.
|
|
161 |
+ //
|
|
162 |
+ // HTTP Mapping: 501 Not Implemented
|
|
163 |
+ UNIMPLEMENTED = 12;
|
|
164 |
+ |
|
165 |
+ // Internal errors. This means that some invariants expected by the
|
|
166 |
+ // underlying system have been broken. This error code is reserved
|
|
167 |
+ // for serious errors.
|
|
168 |
+ //
|
|
169 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
170 |
+ INTERNAL = 13;
|
|
171 |
+ |
|
172 |
+ // The service is currently unavailable. This is most likely a
|
|
173 |
+ // transient condition, which can be corrected by retrying with
|
|
174 |
+ // a backoff.
|
|
175 |
+ //
|
|
176 |
+ // See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
177 |
+ // `ABORTED`, and `UNAVAILABLE`.
|
|
178 |
+ //
|
|
179 |
+ // HTTP Mapping: 503 Service Unavailable
|
|
180 |
+ UNAVAILABLE = 14;
|
|
181 |
+ |
|
182 |
+ // Unrecoverable data loss or corruption.
|
|
183 |
+ //
|
|
184 |
+ // HTTP Mapping: 500 Internal Server Error
|
|
185 |
+ DATA_LOSS = 15;
|
|
186 |
+}
|
|
\ No newline at end of file |
1 |
+# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
2 |
+# source: google/rpc/code.proto
|
|
3 |
+ |
|
4 |
+import sys
|
|
5 |
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
|
6 |
+from google.protobuf.internal import enum_type_wrapper
|
|
7 |
+from google.protobuf import descriptor as _descriptor
|
|
8 |
+from google.protobuf import message as _message
|
|
9 |
+from google.protobuf import reflection as _reflection
|
|
10 |
+from google.protobuf import symbol_database as _symbol_database
|
|
11 |
+# @@protoc_insertion_point(imports)
|
|
12 |
+ |
|
13 |
+_sym_db = _symbol_database.Default()
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+ |
|
17 |
+ |
|
18 |
+DESCRIPTOR = _descriptor.FileDescriptor(
|
|
19 |
+ name='google/rpc/code.proto',
|
|
20 |
+ package='google.rpc',
|
|
21 |
+ syntax='proto3',
|
|
22 |
+ serialized_options=_b('\n\016com.google.rpcB\tCodeProtoP\001Z3google.golang.org/genproto/googleapis/rpc/code;code\242\002\003RPC'),
|
|
23 |
+ serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42X\n\x0e\x63om.google.rpcB\tCodeProtoP\x01Z3google.golang.org/genproto/googleapis/rpc/code;code\xa2\x02\x03RPCb\x06proto3')
|
|
24 |
+)
|
|
25 |
+ |
|
26 |
+_CODE = _descriptor.EnumDescriptor(
|
|
27 |
+ name='Code',
|
|
28 |
+ full_name='google.rpc.Code',
|
|
29 |
+ filename=None,
|
|
30 |
+ file=DESCRIPTOR,
|
|
31 |
+ values=[
|
|
32 |
+ _descriptor.EnumValueDescriptor(
|
|
33 |
+ name='OK', index=0, number=0,
|
|
34 |
+ serialized_options=None,
|
|
35 |
+ type=None),
|
|
36 |
+ _descriptor.EnumValueDescriptor(
|
|
37 |
+ name='CANCELLED', index=1, number=1,
|
|
38 |
+ serialized_options=None,
|
|
39 |
+ type=None),
|
|
40 |
+ _descriptor.EnumValueDescriptor(
|
|
41 |
+ name='UNKNOWN', index=2, number=2,
|
|
42 |
+ serialized_options=None,
|
|
43 |
+ type=None),
|
|
44 |
+ _descriptor.EnumValueDescriptor(
|
|
45 |
+ name='INVALID_ARGUMENT', index=3, number=3,
|
|
46 |
+ serialized_options=None,
|
|
47 |
+ type=None),
|
|
48 |
+ _descriptor.EnumValueDescriptor(
|
|
49 |
+ name='DEADLINE_EXCEEDED', index=4, number=4,
|
|
50 |
+ serialized_options=None,
|
|
51 |
+ type=None),
|
|
52 |
+ _descriptor.EnumValueDescriptor(
|
|
53 |
+ name='NOT_FOUND', index=5, number=5,
|
|
54 |
+ serialized_options=None,
|
|
55 |
+ type=None),
|
|
56 |
+ _descriptor.EnumValueDescriptor(
|
|
57 |
+ name='ALREADY_EXISTS', index=6, number=6,
|
|
58 |
+ serialized_options=None,
|
|
59 |
+ type=None),
|
|
60 |
+ _descriptor.EnumValueDescriptor(
|
|
61 |
+ name='PERMISSION_DENIED', index=7, number=7,
|
|
62 |
+ serialized_options=None,
|
|
63 |
+ type=None),
|
|
64 |
+ _descriptor.EnumValueDescriptor(
|
|
65 |
+ name='UNAUTHENTICATED', index=8, number=16,
|
|
66 |
+ serialized_options=None,
|
|
67 |
+ type=None),
|
|
68 |
+ _descriptor.EnumValueDescriptor(
|
|
69 |
+ name='RESOURCE_EXHAUSTED', index=9, number=8,
|
|
70 |
+ serialized_options=None,
|
|
71 |
+ type=None),
|
|
72 |
+ _descriptor.EnumValueDescriptor(
|
|
73 |
+ name='FAILED_PRECONDITION', index=10, number=9,
|
|
74 |
+ serialized_options=None,
|
|
75 |
+ type=None),
|
|
76 |
+ _descriptor.EnumValueDescriptor(
|
|
77 |
+ name='ABORTED', index=11, number=10,
|
|
78 |
+ serialized_options=None,
|
|
79 |
+ type=None),
|
|
80 |
+ _descriptor.EnumValueDescriptor(
|
|
81 |
+ name='OUT_OF_RANGE', index=12, number=11,
|
|
82 |
+ serialized_options=None,
|
|
83 |
+ type=None),
|
|
84 |
+ _descriptor.EnumValueDescriptor(
|
|
85 |
+ name='UNIMPLEMENTED', index=13, number=12,
|
|
86 |
+ serialized_options=None,
|
|
87 |
+ type=None),
|
|
88 |
+ _descriptor.EnumValueDescriptor(
|
|
89 |
+ name='INTERNAL', index=14, number=13,
|
|
90 |
+ serialized_options=None,
|
|
91 |
+ type=None),
|
|
92 |
+ _descriptor.EnumValueDescriptor(
|
|
93 |
+ name='UNAVAILABLE', index=15, number=14,
|
|
94 |
+ serialized_options=None,
|
|
95 |
+ type=None),
|
|
96 |
+ _descriptor.EnumValueDescriptor(
|
|
97 |
+ name='DATA_LOSS', index=16, number=15,
|
|
98 |
+ serialized_options=None,
|
|
99 |
+ type=None),
|
|
100 |
+ ],
|
|
101 |
+ containing_type=None,
|
|
102 |
+ serialized_options=None,
|
|
103 |
+ serialized_start=38,
|
|
104 |
+ serialized_end=349,
|
|
105 |
+)
|
|
106 |
+_sym_db.RegisterEnumDescriptor(_CODE)
|
|
107 |
+ |
|
108 |
+Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
|
|
109 |
+OK = 0
|
|
110 |
+CANCELLED = 1
|
|
111 |
+UNKNOWN = 2
|
|
112 |
+INVALID_ARGUMENT = 3
|
|
113 |
+DEADLINE_EXCEEDED = 4
|
|
114 |
+NOT_FOUND = 5
|
|
115 |
+ALREADY_EXISTS = 6
|
|
116 |
+PERMISSION_DENIED = 7
|
|
117 |
+UNAUTHENTICATED = 16
|
|
118 |
+RESOURCE_EXHAUSTED = 8
|
|
119 |
+FAILED_PRECONDITION = 9
|
|
120 |
+ABORTED = 10
|
|
121 |
+OUT_OF_RANGE = 11
|
|
122 |
+UNIMPLEMENTED = 12
|
|
123 |
+INTERNAL = 13
|
|
124 |
+UNAVAILABLE = 14
|
|
125 |
+DATA_LOSS = 15
|
|
126 |
+ |
|
127 |
+ |
|
128 |
+DESCRIPTOR.enum_types_by_name['Code'] = _CODE
|
|
129 |
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
|
130 |
+ |
|
131 |
+ |
|
132 |
+DESCRIPTOR._options = None
|
|
133 |
+# @@protoc_insertion_point(module_scope)
|
1 |
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
|
2 |
+import grpc
|
|
3 |
+ |
... | ... | @@ -17,7 +17,6 @@ |
17 | 17 |
# Tristan Daniël Maat <tristan maat codethink co uk>
|
18 | 18 |
#
|
19 | 19 |
from .job import Job
|
20 |
-from ..._platform import Platform
|
|
21 | 20 |
|
22 | 21 |
|
23 | 22 |
class CacheSizeJob(Job):
|
... | ... | @@ -25,8 +24,8 @@ class CacheSizeJob(Job): |
25 | 24 |
super().__init__(*args, **kwargs)
|
26 | 25 |
self._complete_cb = complete_cb
|
27 | 26 |
|
28 |
- platform = Platform.get_platform()
|
|
29 |
- self._artifacts = platform.artifactcache
|
|
27 |
+ context = self._scheduler.context
|
|
28 |
+ self._artifacts = context.artifactcache
|
|
30 | 29 |
|
31 | 30 |
def child_process(self):
|
32 | 31 |
return self._artifacts.compute_cache_size()
|
... | ... | @@ -17,15 +17,14 @@ |
17 | 17 |
# Tristan Daniël Maat <tristan maat codethink co uk>
|
18 | 18 |
#
|
19 | 19 |
from .job import Job
|
20 |
-from ..._platform import Platform
|
|
21 | 20 |
|
22 | 21 |
|
23 | 22 |
class CleanupJob(Job):
|
24 | 23 |
def __init__(self, *args, **kwargs):
|
25 | 24 |
super().__init__(*args, **kwargs)
|
26 | 25 |
|
27 |
- platform = Platform.get_platform()
|
|
28 |
- self._artifacts = platform.artifactcache
|
|
26 |
+ context = self._scheduler.context
|
|
27 |
+ self._artifacts = context.artifactcache
|
|
29 | 28 |
|
30 | 29 |
def child_process(self):
|
31 | 30 |
return self._artifacts.clean()
|
... | ... | @@ -24,7 +24,6 @@ from . import Queue, QueueStatus |
24 | 24 |
from ..jobs import ElementJob
|
25 | 25 |
from ..resources import ResourceType
|
26 | 26 |
from ..._message import MessageType
|
27 |
-from ..._platform import Platform
|
|
28 | 27 |
|
29 | 28 |
|
30 | 29 |
# A queue which assembles elements
|
... | ... | @@ -94,8 +93,8 @@ class BuildQueue(Queue): |
94 | 93 |
# as returned from Element._assemble() to the estimated
|
95 | 94 |
# artifact cache size
|
96 | 95 |
#
|
97 |
- platform = Platform.get_platform()
|
|
98 |
- artifacts = platform.artifactcache
|
|
96 |
+ context = self._scheduler.context
|
|
97 |
+ artifacts = context.artifactcache
|
|
99 | 98 |
|
100 | 99 |
artifacts.add_artifact_size(artifact_size)
|
101 | 100 |
|
... | ... | @@ -29,7 +29,6 @@ from contextlib import contextmanager |
29 | 29 |
# Local imports
|
30 | 30 |
from .resources import Resources, ResourceType
|
31 | 31 |
from .jobs import CacheSizeJob, CleanupJob
|
32 |
-from .._platform import Platform
|
|
33 | 32 |
|
34 | 33 |
|
35 | 34 |
# A decent return code for Scheduler.run()
|
... | ... | @@ -348,8 +347,8 @@ class Scheduler(): |
348 | 347 |
# which will report the calculated cache size.
|
349 | 348 |
#
|
350 | 349 |
def _run_cleanup(self, cache_size):
|
351 |
- platform = Platform.get_platform()
|
|
352 |
- artifacts = platform.artifactcache
|
|
350 |
+ context = self.context
|
|
351 |
+ artifacts = context.artifactcache
|
|
353 | 352 |
|
354 | 353 |
if not artifacts.has_quota_exceeded():
|
355 | 354 |
return
|
... | ... | @@ -32,7 +32,6 @@ from ._exceptions import StreamError, ImplError, BstError, set_last_task_error |
32 | 32 |
from ._message import Message, MessageType
|
33 | 33 |
from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, BuildQueue, PullQueue, PushQueue
|
34 | 34 |
from ._pipeline import Pipeline, PipelineSelection
|
35 |
-from ._platform import Platform
|
|
36 | 35 |
from . import utils, _yaml, _site
|
37 | 36 |
from . import Scope, Consistency
|
38 | 37 |
|
... | ... | @@ -71,8 +70,7 @@ class Stream(): |
71 | 70 |
#
|
72 | 71 |
# Private members
|
73 | 72 |
#
|
74 |
- self._platform = Platform.get_platform()
|
|
75 |
- self._artifacts = self._platform.artifactcache
|
|
73 |
+ self._artifacts = context.artifactcache
|
|
76 | 74 |
self._context = context
|
77 | 75 |
self._project = project
|
78 | 76 |
self._pipeline = Pipeline(context, project, self._artifacts)
|
... | ... | @@ -246,15 +246,23 @@ class Element(Plugin): |
246 | 246 |
self.__config = self.__extract_config(meta)
|
247 | 247 |
self._configure(self.__config)
|
248 | 248 |
|
249 |
- # Extract Sandbox config
|
|
250 |
- self.__sandbox_config = self.__extract_sandbox_config(meta)
|
|
251 |
- |
|
252 | 249 |
# Extract remote execution URL
|
253 | 250 |
if not self.__is_junction:
|
254 | 251 |
self.__remote_execution_url = project.remote_execution_url
|
255 | 252 |
else:
|
256 | 253 |
self.__remote_execution_url = None
|
257 | 254 |
|
255 |
+ # Extract Sandbox config
|
|
256 |
+ self.__sandbox_config = self.__extract_sandbox_config(meta)
|
|
257 |
+ |
|
258 |
+ self.__sandbox_config_supported = True
|
|
259 |
+ if not self.__use_remote_execution():
|
|
260 |
+ platform = Platform.get_platform()
|
|
261 |
+ if not platform.check_sandbox_config(self.__sandbox_config):
|
|
262 |
+ # Local sandbox does not fully support specified sandbox config.
|
|
263 |
+ # This will taint the artifact, disable pushing.
|
|
264 |
+ self.__sandbox_config_supported = False
|
|
265 |
+ |
|
258 | 266 |
def __lt__(self, other):
|
259 | 267 |
return self.name < other.name
|
260 | 268 |
|
... | ... | @@ -1521,6 +1529,11 @@ class Element(Plugin): |
1521 | 1529 |
context = self._get_context()
|
1522 | 1530 |
with self._output_file() as output_file:
|
1523 | 1531 |
|
1532 |
+ if not self.__sandbox_config_supported:
|
|
1533 |
+ self.warn("Sandbox configuration is not supported by the platform.",
|
|
1534 |
+ detail="Falling back to UID {} GID {}. Artifact will not be pushed."
|
|
1535 |
+ .format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid))
|
|
1536 |
+ |
|
1524 | 1537 |
# Explicitly clean it up, keep the build dir around if exceptions are raised
|
1525 | 1538 |
os.makedirs(context.builddir, exist_ok=True)
|
1526 | 1539 |
rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
|
... | ... | @@ -1532,8 +1545,6 @@ class Element(Plugin): |
1532 | 1545 |
with _signals.terminator(cleanup_rootdir), \
|
1533 | 1546 |
self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # nopep8
|
1534 | 1547 |
|
1535 |
- sandbox_vroot = sandbox.get_virtual_directory()
|
|
1536 |
- |
|
1537 | 1548 |
# By default, the dynamic public data is the same as the static public data.
|
1538 | 1549 |
# The plugin's assemble() method may modify this, though.
|
1539 | 1550 |
self.__dynamic_public = _yaml.node_copy(self.__public)
|
... | ... | @@ -1581,7 +1592,6 @@ class Element(Plugin): |
1581 | 1592 |
finally:
|
1582 | 1593 |
if collect is not None:
|
1583 | 1594 |
try:
|
1584 |
- # Sandbox will probably have replaced its virtual directory, so get it again
|
|
1585 | 1595 |
sandbox_vroot = sandbox.get_virtual_directory()
|
1586 | 1596 |
collectvdir = sandbox_vroot.descend(collect.lstrip(os.sep).split(os.sep))
|
1587 | 1597 |
except VirtualDirectoryError:
|
... | ... | @@ -1606,6 +1616,7 @@ class Element(Plugin): |
1606 | 1616 |
collectvdir.export_files(filesdir, can_link=True)
|
1607 | 1617 |
|
1608 | 1618 |
try:
|
1619 |
+ sandbox_vroot = sandbox.get_virtual_directory()
|
|
1609 | 1620 |
sandbox_build_dir = sandbox_vroot.descend(
|
1610 | 1621 |
self.get_variable('build-root').lstrip(os.sep).split(os.sep))
|
1611 | 1622 |
# Hard link files from build-root dir to buildtreedir directory
|
... | ... | @@ -2084,7 +2095,7 @@ class Element(Plugin): |
2084 | 2095 |
#
|
2085 | 2096 |
# Raises an error if the artifact is not cached.
|
2086 | 2097 |
#
|
2087 |
- def __assert_cached(self, keystrength=_KeyStrength.STRONG):
|
|
2098 |
+ def __assert_cached(self, keystrength=None):
|
|
2088 | 2099 |
assert self.__is_cached(keystrength=keystrength), "{}: Missing artifact {}".format(
|
2089 | 2100 |
self, self._get_brief_display_key())
|
2090 | 2101 |
|
... | ... | @@ -2112,10 +2123,19 @@ class Element(Plugin): |
2112 | 2123 |
workspaced_dependencies = self.__get_artifact_metadata_workspaced_dependencies()
|
2113 | 2124 |
|
2114 | 2125 |
# Other conditions should be or-ed
|
2115 |
- self.__tainted = workspaced or workspaced_dependencies
|
|
2126 |
+ self.__tainted = (workspaced or workspaced_dependencies or
|
|
2127 |
+ not self.__sandbox_config_supported)
|
|
2116 | 2128 |
|
2117 | 2129 |
return self.__tainted
|
2118 | 2130 |
|
2131 |
+ # __use_remote_execution():
|
|
2132 |
+ #
|
|
2133 |
+ # Returns True if remote execution is configured and the element plugin
|
|
2134 |
+ # supports it.
|
|
2135 |
+ #
|
|
2136 |
+ def __use_remote_execution(self):
|
|
2137 |
+ return self.__remote_execution_url and self.BST_VIRTUAL_DIRECTORY
|
|
2138 |
+ |
|
2119 | 2139 |
# __sandbox():
|
2120 | 2140 |
#
|
2121 | 2141 |
# A context manager to prepare a Sandbox object at the specified directory,
|
... | ... | @@ -2137,9 +2157,7 @@ class Element(Plugin): |
2137 | 2157 |
project = self._get_project()
|
2138 | 2158 |
platform = Platform.get_platform()
|
2139 | 2159 |
|
2140 |
- if (directory is not None and
|
|
2141 |
- self.__remote_execution_url and
|
|
2142 |
- self.BST_VIRTUAL_DIRECTORY):
|
|
2160 |
+ if directory is not None and self.__use_remote_execution():
|
|
2143 | 2161 |
|
2144 | 2162 |
self.info("Using a remote sandbox for artifact {} with directory '{}'".format(self.name, directory))
|
2145 | 2163 |
|
... | ... | @@ -205,7 +205,17 @@ class GitMirror(SourceFetcher): |
205 | 205 |
[self.source.host_git, 'rev-parse', tracking],
|
206 | 206 |
fail="Unable to find commit for specified branch name '{}'".format(tracking),
|
207 | 207 |
cwd=self.mirror)
|
208 |
- return output.rstrip('\n')
|
|
208 |
+ ref = output.rstrip('\n')
|
|
209 |
+ |
|
210 |
+ # Prefix the ref with the closest annotated tag, if available,
|
|
211 |
+ # to make the ref human readable
|
|
212 |
+ exit_code, output = self.source.check_output(
|
|
213 |
+ [self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
|
|
214 |
+ cwd=self.mirror)
|
|
215 |
+ if exit_code == 0:
|
|
216 |
+ ref = output.rstrip('\n')
|
|
217 |
+ |
|
218 |
+ return ref
|
|
209 | 219 |
|
210 | 220 |
def stage(self, directory, track=None):
|
211 | 221 |
fullpath = os.path.join(directory, self.path)
|
... | ... | @@ -27,7 +27,7 @@ from . import Sandbox |
27 | 27 |
from ..storage._filebaseddirectory import FileBasedDirectory
|
28 | 28 |
from ..storage._casbaseddirectory import CasBasedDirectory
|
29 | 29 |
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
30 |
-from .._platform import Platform
|
|
30 |
+from .._protos.google.rpc import code_pb2
|
|
31 | 31 |
|
32 | 32 |
|
33 | 33 |
class SandboxError(Exception):
|
... | ... | @@ -71,8 +71,8 @@ class SandboxRemote(Sandbox): |
71 | 71 |
output_files=[],
|
72 | 72 |
output_directories=[self._output_directory],
|
73 | 73 |
platform=None)
|
74 |
- platform = Platform.get_platform()
|
|
75 |
- cascache = platform.artifactcache
|
|
74 |
+ context = self._get_context()
|
|
75 |
+ cascache = context.artifactcache
|
|
76 | 76 |
# Upload the Command message to the remote CAS server
|
77 | 77 |
command_digest = cascache.push_message(self._get_project(), remote_command)
|
78 | 78 |
if not command_digest or not cascache.verify_digest_pushed(self._get_project(), command_digest):
|
... | ... | @@ -134,8 +134,8 @@ class SandboxRemote(Sandbox): |
134 | 134 |
if tree_digest is None or not tree_digest.hash:
|
135 | 135 |
raise SandboxError("Output directory structure had no digest attached.")
|
136 | 136 |
|
137 |
- platform = Platform.get_platform()
|
|
138 |
- cascache = platform.artifactcache
|
|
137 |
+ context = self._get_context()
|
|
138 |
+ cascache = context.artifactcache
|
|
139 | 139 |
# Now do a pull to ensure we have the necessary parts.
|
140 | 140 |
dir_digest = cascache.pull_tree(self._get_project(), tree_digest)
|
141 | 141 |
if dir_digest is None or not dir_digest.hash or not dir_digest.size_bytes:
|
... | ... | @@ -170,8 +170,8 @@ class SandboxRemote(Sandbox): |
170 | 170 |
|
171 | 171 |
upload_vdir.recalculate_hash()
|
172 | 172 |
|
173 |
- platform = Platform.get_platform()
|
|
174 |
- cascache = platform.artifactcache
|
|
173 |
+ context = self._get_context()
|
|
174 |
+ cascache = context.artifactcache
|
|
175 | 175 |
# Now, push that key (without necessarily needing a ref) to the remote.
|
176 | 176 |
cascache.push_directory(self._get_project(), upload_vdir)
|
177 | 177 |
if not cascache.verify_digest_pushed(self._get_project(), upload_vdir.ref):
|
... | ... | @@ -207,15 +207,23 @@ class SandboxRemote(Sandbox): |
207 | 207 |
|
208 | 208 |
operation.response.Unpack(execution_response)
|
209 | 209 |
|
210 |
- if execution_response.status.code != 0:
|
|
211 |
- # A normal error during the build: the remote execution system
|
|
212 |
- # has worked correctly but the command failed.
|
|
213 |
- # execution_response.error also contains 'message' (str) and
|
|
214 |
- # 'details' (iterator of Any) which we ignore at the moment.
|
|
215 |
- return execution_response.status.code
|
|
210 |
+ if execution_response.status.code != code_pb2.OK:
|
|
211 |
+ # An unexpected error during execution: the remote execution
|
|
212 |
+ # system failed at processing the execution request.
|
|
213 |
+ if execution_response.status.message:
|
|
214 |
+ raise SandboxError(execution_response.status.message)
|
|
215 |
+ else:
|
|
216 |
+ raise SandboxError("Remote server failed at executing the build request.")
|
|
216 | 217 |
|
217 | 218 |
action_result = execution_response.result
|
218 | 219 |
|
220 |
+ if action_result.exit_code != 0:
|
|
221 |
+ # A normal error during the build: the remote execution system
|
|
222 |
+ # has worked correctly but the command failed.
|
|
223 |
+ # action_result.stdout and action_result.stderr also contains
|
|
224 |
+ # build command outputs which we ignore at the moment.
|
|
225 |
+ return action_result.exit_code
|
|
226 |
+ |
|
219 | 227 |
self.process_job_output(action_result.output_directories, action_result.output_files)
|
220 | 228 |
|
221 | 229 |
return 0
|
... | ... | @@ -110,6 +110,10 @@ class Sandbox(): |
110 | 110 |
os.makedirs(directory_, exist_ok=True)
|
111 | 111 |
self._vdir = None
|
112 | 112 |
|
113 |
+ # This is set if anyone requests access to the underlying
|
|
114 |
+ # directory via get_directory.
|
|
115 |
+ self._never_cache_vdirs = False
|
|
116 |
+ |
|
113 | 117 |
def get_directory(self):
|
114 | 118 |
"""Fetches the sandbox root directory
|
115 | 119 |
|
... | ... | @@ -122,24 +126,28 @@ class Sandbox(): |
122 | 126 |
|
123 | 127 |
"""
|
124 | 128 |
if self.__allow_real_directory:
|
129 |
+ self._never_cache_vdirs = True
|
|
125 | 130 |
return self._root
|
126 | 131 |
else:
|
127 | 132 |
raise BstError("You can't use get_directory")
|
128 | 133 |
|
129 | 134 |
def get_virtual_directory(self):
|
130 |
- """Fetches the sandbox root directory
|
|
135 |
+ """Fetches the sandbox root directory as a virtual Directory.
|
|
131 | 136 |
|
132 | 137 |
The root directory is where artifacts for the base
|
133 |
- runtime environment should be staged. Only works if
|
|
134 |
- BST_VIRTUAL_DIRECTORY is not set.
|
|
138 |
+ runtime environment should be staged.
|
|
139 |
+ |
|
140 |
+ Use caution if you use get_directory and
|
|
141 |
+ get_virtual_directory. If you alter the contents of the
|
|
142 |
+ directory returned by get_directory, all objects returned by
|
|
143 |
+ get_virtual_directory or derived from them are invalid and you
|
|
144 |
+ must call get_virtual_directory again to get a new copy.
|
|
135 | 145 |
|
136 | 146 |
Returns:
|
137 |
- (str): The sandbox root directory
|
|
147 |
+ (Directory): The sandbox root directory
|
|
138 | 148 |
|
139 | 149 |
"""
|
140 |
- if not self._vdir:
|
|
141 |
- # BST_CAS_DIRECTORIES is a deliberately hidden environment variable which
|
|
142 |
- # can be used to switch on CAS-based directories for testing.
|
|
150 |
+ if self._vdir is None or self._never_cache_vdirs:
|
|
143 | 151 |
if 'BST_CAS_DIRECTORIES' in os.environ:
|
144 | 152 |
self._vdir = CasBasedDirectory(self.__context, ref=None)
|
145 | 153 |
else:
|
... | ... | @@ -38,7 +38,6 @@ from .._exceptions import BstError |
38 | 38 |
from .directory import Directory, VirtualDirectoryError
|
39 | 39 |
from ._filebaseddirectory import FileBasedDirectory
|
40 | 40 |
from ..utils import FileListResult, safe_copy, list_relative_paths
|
41 |
-from .._artifactcache.cascache import CASCache
|
|
42 | 41 |
|
43 | 42 |
|
44 | 43 |
class IndexEntry():
|
... | ... | @@ -80,7 +79,7 @@ class CasBasedDirectory(Directory): |
80 | 79 |
self.filename = filename
|
81 | 80 |
self.common_name = common_name
|
82 | 81 |
self.pb2_directory = remote_execution_pb2.Directory()
|
83 |
- self.cas_cache = CASCache(context)
|
|
82 |
+ self.cas_cache = context.artifactcache
|
|
84 | 83 |
if ref:
|
85 | 84 |
with open(self.cas_cache.objpath(ref), 'rb') as f:
|
86 | 85 |
self.pb2_directory.ParseFromString(f.read())
|
... | ... | @@ -6,7 +6,6 @@ import signal |
6 | 6 |
import pytest
|
7 | 7 |
|
8 | 8 |
from buildstream import _yaml, _signals, utils
|
9 |
-from buildstream._artifactcache.cascache import CASCache
|
|
10 | 9 |
from buildstream._context import Context
|
11 | 10 |
from buildstream._project import Project
|
12 | 11 |
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
... | ... | @@ -88,7 +87,7 @@ def test_pull(cli, tmpdir, datafiles): |
88 | 87 |
# Load the project and CAS cache
|
89 | 88 |
project = Project(project_dir, context)
|
90 | 89 |
project.ensure_fully_loaded()
|
91 |
- cas = CASCache(context)
|
|
90 |
+ cas = context.artifactcache
|
|
92 | 91 |
|
93 | 92 |
# Assert that the element's artifact is **not** cached
|
94 | 93 |
element = project.load_elements(['target.bst'], cas)[0]
|
... | ... | @@ -130,7 +129,7 @@ def _test_pull(user_config_file, project_dir, artifact_dir, |
130 | 129 |
project.ensure_fully_loaded()
|
131 | 130 |
|
132 | 131 |
# Create a local CAS cache handle
|
133 |
- cas = CASCache(context)
|
|
132 |
+ cas = context.artifactcache
|
|
134 | 133 |
|
135 | 134 |
# Load the target element
|
136 | 135 |
element = project.load_elements([element_name], cas)[0]
|
... | ... | @@ -191,7 +190,7 @@ def test_pull_tree(cli, tmpdir, datafiles): |
191 | 190 |
# Load the project and CAS cache
|
192 | 191 |
project = Project(project_dir, context)
|
193 | 192 |
project.ensure_fully_loaded()
|
194 |
- cas = CASCache(context)
|
|
193 |
+ cas = context.artifactcache
|
|
195 | 194 |
|
196 | 195 |
# Assert that the element's artifact is cached
|
197 | 196 |
element = project.load_elements(['target.bst'], cas)[0]
|
... | ... | @@ -269,7 +268,7 @@ def _test_push_tree(user_config_file, project_dir, artifact_dir, artifact_digest |
269 | 268 |
project.ensure_fully_loaded()
|
270 | 269 |
|
271 | 270 |
# Create a local CAS cache handle
|
272 |
- cas = CASCache(context)
|
|
271 |
+ cas = context.artifactcache
|
|
273 | 272 |
|
274 | 273 |
# Manually setup the CAS remote
|
275 | 274 |
cas.setup_remotes(use_config=True)
|
... | ... | @@ -304,7 +303,7 @@ def _test_pull_tree(user_config_file, project_dir, artifact_dir, artifact_digest |
304 | 303 |
project.ensure_fully_loaded()
|
305 | 304 |
|
306 | 305 |
# Create a local CAS cache handle
|
307 |
- cas = CASCache(context)
|
|
306 |
+ cas = context.artifactcache
|
|
308 | 307 |
|
309 | 308 |
# Manually setup the CAS remote
|
310 | 309 |
cas.setup_remotes(use_config=True)
|
... | ... | @@ -6,7 +6,6 @@ import pytest |
6 | 6 |
|
7 | 7 |
from pluginbase import PluginBase
|
8 | 8 |
from buildstream import _yaml, _signals, utils
|
9 |
-from buildstream._artifactcache.cascache import CASCache
|
|
10 | 9 |
from buildstream._context import Context
|
11 | 10 |
from buildstream._project import Project
|
12 | 11 |
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
... | ... | @@ -67,7 +66,7 @@ def test_push(cli, tmpdir, datafiles): |
67 | 66 |
project.ensure_fully_loaded()
|
68 | 67 |
|
69 | 68 |
# Create a local CAS cache handle
|
70 |
- cas = CASCache(context)
|
|
69 |
+ cas = context.artifactcache
|
|
71 | 70 |
|
72 | 71 |
# Assert that the element's artifact is cached
|
73 | 72 |
element = project.load_elements(['target.bst'], cas)[0]
|
... | ... | @@ -109,7 +108,7 @@ def _test_push(user_config_file, project_dir, artifact_dir, |
109 | 108 |
project.ensure_fully_loaded()
|
110 | 109 |
|
111 | 110 |
# Create a local CAS cache handle
|
112 |
- cas = CASCache(context)
|
|
111 |
+ cas = context.artifactcache
|
|
113 | 112 |
|
114 | 113 |
# Load the target element
|
115 | 114 |
element = project.load_elements([element_name], cas)[0]
|
... | ... | @@ -166,7 +165,7 @@ def test_push_directory(cli, tmpdir, datafiles): |
166 | 165 |
# Load the project and CAS cache
|
167 | 166 |
project = Project(project_dir, context)
|
168 | 167 |
project.ensure_fully_loaded()
|
169 |
- cas = CASCache(context)
|
|
168 |
+ cas = context.artifactcache
|
|
170 | 169 |
|
171 | 170 |
# Assert that the element's artifact is cached
|
172 | 171 |
element = project.load_elements(['target.bst'], cas)[0]
|
... | ... | @@ -217,7 +216,7 @@ def _test_push_directory(user_config_file, project_dir, artifact_dir, artifact_d |
217 | 216 |
project.ensure_fully_loaded()
|
218 | 217 |
|
219 | 218 |
# Create a local CAS cache handle
|
220 |
- cas = CASCache(context)
|
|
219 |
+ cas = context.artifactcache
|
|
221 | 220 |
|
222 | 221 |
# Manually setup the CAS remote
|
223 | 222 |
cas.setup_remotes(use_config=True)
|
... | ... | @@ -292,7 +291,7 @@ def _test_push_message(user_config_file, project_dir, artifact_dir, queue): |
292 | 291 |
project.ensure_fully_loaded()
|
293 | 292 |
|
294 | 293 |
# Create a local CAS cache handle
|
295 |
- cas = CASCache(context)
|
|
294 |
+ cas = context.artifactcache
|
|
296 | 295 |
|
297 | 296 |
# Manually setup the CAS remote
|
298 | 297 |
cas.setup_remotes(use_config=True)
|
1 |
+kind: compose
|
|
2 |
+ |
|
3 |
+build-depends:
|
|
4 |
+- target.bst
|
1 |
+import os
|
|
2 |
+import pytest
|
|
3 |
+from tests.testutils import cli
|
|
4 |
+ |
|
5 |
+# Project directory
|
|
6 |
+DATA_DIR = os.path.join(
|
|
7 |
+ os.path.dirname(os.path.realpath(__file__)),
|
|
8 |
+ "project",
|
|
9 |
+)
|
|
10 |
+ |
|
11 |
+ |
|
12 |
+def strict_args(args, strict):
|
|
13 |
+ if strict != "strict":
|
|
14 |
+ return ['--no-strict'] + args
|
|
15 |
+ return args
|
|
16 |
+ |
|
17 |
+ |
|
18 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
19 |
+@pytest.mark.parametrize("strict", ["strict", "non-strict"])
|
|
20 |
+def test_rebuild(datafiles, cli, strict):
|
|
21 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
22 |
+ checkout = os.path.join(cli.directory, 'checkout')
|
|
23 |
+ |
|
24 |
+ # First build intermediate target.bst
|
|
25 |
+ result = cli.run(project=project, args=strict_args(['build', 'target.bst'], strict))
|
|
26 |
+ result.assert_success()
|
|
27 |
+ |
|
28 |
+ # Modify base import
|
|
29 |
+ with open(os.path.join(project, 'files', 'dev-files', 'usr', 'include', 'new.h'), "w") as f:
|
|
30 |
+ f.write("#define NEW")
|
|
31 |
+ |
|
32 |
+ # Rebuild base import and build top-level rebuild-target.bst
|
|
33 |
+ # In non-strict mode, this does not rebuild intermediate target.bst,
|
|
34 |
+ # which means that a weakly cached target.bst will be staged as dependency.
|
|
35 |
+ result = cli.run(project=project, args=strict_args(['build', 'rebuild-target.bst'], strict))
|
|
36 |
+ result.assert_success()
|
... | ... | @@ -476,3 +476,48 @@ def test_ref_not_in_track_warn_error(cli, tmpdir, datafiles): |
476 | 476 |
result = cli.run(project=project, args=['build', 'target.bst'])
|
477 | 477 |
result.assert_main_error(ErrorDomain.STREAM, None)
|
478 | 478 |
result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.REF_NOT_IN_TRACK)
|
479 |
+ |
|
480 |
+ |
|
481 |
+@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
|
|
482 |
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'template'))
|
|
483 |
+@pytest.mark.parametrize("tag,extra_commit", [(False, False), (True, False), (True, True)])
|
|
484 |
+def test_track_fetch(cli, tmpdir, datafiles, tag, extra_commit):
|
|
485 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
486 |
+ |
|
487 |
+ # Create the repo from 'repofiles' subdir
|
|
488 |
+ repo = create_repo('git', str(tmpdir))
|
|
489 |
+ ref = repo.create(os.path.join(project, 'repofiles'))
|
|
490 |
+ if tag:
|
|
491 |
+ repo.add_tag('tag')
|
|
492 |
+ if extra_commit:
|
|
493 |
+ repo.add_commit()
|
|
494 |
+ |
|
495 |
+ # Write out our test target
|
|
496 |
+ element = {
|
|
497 |
+ 'kind': 'import',
|
|
498 |
+ 'sources': [
|
|
499 |
+ repo.source_config()
|
|
500 |
+ ]
|
|
501 |
+ }
|
|
502 |
+ element_path = os.path.join(project, 'target.bst')
|
|
503 |
+ _yaml.dump(element, element_path)
|
|
504 |
+ |
|
505 |
+ # Track it
|
|
506 |
+ result = cli.run(project=project, args=['track', 'target.bst'])
|
|
507 |
+ result.assert_success()
|
|
508 |
+ |
|
509 |
+ element = _yaml.load(element_path)
|
|
510 |
+ new_ref = element['sources'][0]['ref']
|
|
511 |
+ |
|
512 |
+ if tag:
|
|
513 |
+ # Check and strip prefix
|
|
514 |
+ prefix = 'tag-{}-g'.format(0 if not extra_commit else 1)
|
|
515 |
+ assert new_ref.startswith(prefix)
|
|
516 |
+ new_ref = new_ref[len(prefix):]
|
|
517 |
+ |
|
518 |
+ # 40 chars for SHA-1
|
|
519 |
+ assert len(new_ref) == 40
|
|
520 |
+ |
|
521 |
+ # Fetch it
|
|
522 |
+ result = cli.run(project=project, args=['fetch', 'target.bst'])
|
|
523 |
+ result.assert_success()
|
... | ... | @@ -11,7 +11,6 @@ from multiprocessing import Process, Queue |
11 | 11 |
import pytest_cov
|
12 | 12 |
|
13 | 13 |
from buildstream import _yaml
|
14 |
-from buildstream._artifactcache.cascache import CASCache
|
|
15 | 14 |
from buildstream._artifactcache.casserver import create_server
|
16 | 15 |
from buildstream._context import Context
|
17 | 16 |
from buildstream._exceptions import ArtifactError
|
... | ... | @@ -49,7 +48,7 @@ class ArtifactShare(): |
49 | 48 |
context = Context()
|
50 | 49 |
context.artifactdir = self.repodir
|
51 | 50 |
|
52 |
- self.cas = CASCache(context)
|
|
51 |
+ self.cas = context.artifactcache
|
|
53 | 52 |
|
54 | 53 |
self.total_space = total_space
|
55 | 54 |
self.free_space = free_space
|
... | ... | @@ -42,6 +42,9 @@ class Git(Repo): |
42 | 42 |
self._run_git('commit', '-m', 'Initial commit')
|
43 | 43 |
return self.latest_commit()
|
44 | 44 |
|
45 |
+ def add_tag(self, tag):
|
|
46 |
+ self._run_git('tag', tag)
|
|
47 |
+ |
|
45 | 48 |
def add_commit(self):
|
46 | 49 |
self._run_git('commit', '--allow-empty', '-m', 'Additional commit')
|
47 | 50 |
return self.latest_commit()
|