Chandan Singh pushed to branch chandan/bst-checkout-build at BuildStream / buildstream
Commits:
-
b199afe6
by Jürg Billeter at 2018-09-25T09:01:51Z
-
7d199322
by Jürg Billeter at 2018-09-25T09:01:51Z
-
e2e24015
by Jürg Billeter at 2018-09-25T09:01:51Z
-
697d10f2
by Jürg Billeter at 2018-09-25T09:01:51Z
-
81c51dbf
by Jürg Billeter at 2018-09-25T09:31:55Z
-
2df7d140
by Jim MacArthur at 2018-09-25T10:36:37Z
-
62f59382
by Jim MacArthur at 2018-09-25T10:36:37Z
-
8cea7b17
by Jim MacArthur at 2018-09-25T10:58:40Z
-
83ab183e
by Tiago Gomes at 2018-09-25T16:37:30Z
-
c8594806
by Tiago Gomes at 2018-09-25T17:03:55Z
-
b8ad3707
by Chandan Singh at 2018-09-25T23:47:34Z
-
7d88fff1
by Chandan Singh at 2018-09-25T23:47:44Z
10 changed files:
- .gitlab-ci.yml
- buildstream/_artifactcache/cascache.py
- buildstream/_artifactcache/casserver.py
- buildstream/_frontend/cli.py
- buildstream/element.py
- buildstream/element_enums.py
- buildstream/sandbox/sandbox.py
- tests/frontend/buildcheckout.py
- + tests/frontend/project/elements/checkout-deps.bst
- + tests/frontend/project/files/etc-files/etc/buildstream/config
Changes:
... | ... | @@ -79,6 +79,8 @@ source_dist: |
79 | 79 |
- cd ../..
|
80 | 80 |
- mkdir -p coverage-linux/
|
81 | 81 |
- cp dist/buildstream/.coverage.* coverage-linux/coverage."${CI_JOB_NAME}"
|
82 |
+ except:
|
|
83 |
+ - schedules
|
|
82 | 84 |
artifacts:
|
83 | 85 |
paths:
|
84 | 86 |
- coverage-linux/
|
... | ... | @@ -127,6 +129,8 @@ tests-unix: |
127 | 129 |
- cd ../..
|
128 | 130 |
- mkdir -p coverage-unix/
|
129 | 131 |
- cp dist/buildstream/.coverage.* coverage-unix/coverage.unix
|
132 |
+ except:
|
|
133 |
+ - schedules
|
|
130 | 134 |
artifacts:
|
131 | 135 |
paths:
|
132 | 136 |
- coverage-unix/
|
... | ... | @@ -148,10 +152,41 @@ docs: |
148 | 152 |
- make BST_FORCE_SESSION_REBUILD=1 -C doc
|
149 | 153 |
- cd ../..
|
150 | 154 |
- mv dist/buildstream/doc/build/html public
|
155 |
+ except:
|
|
156 |
+ - schedules
|
|
151 | 157 |
artifacts:
|
152 | 158 |
paths:
|
153 | 159 |
- public/
|
154 | 160 |
|
161 |
+.overnight-tests: &overnight-tests-template
|
|
162 |
+ stage: test
|
|
163 |
+ variables:
|
|
164 |
+ bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
|
|
165 |
+ bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
166 |
+ fd_sdk_ref: 718ea88089644a1ea5b488de0b90c2c565cb75f8 # 18.08.12
|
|
167 |
+ before_script:
|
|
168 |
+ - (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
|
|
169 |
+ - pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
|
|
170 |
+ - git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
|
|
171 |
+ - git -C freedesktop-sdk checkout ${fd_sdk_ref}
|
|
172 |
+ only:
|
|
173 |
+ - schedules
|
|
174 |
+ |
|
175 |
+overnight-tests:
|
|
176 |
+ <<: *overnight-tests-template
|
|
177 |
+ script:
|
|
178 |
+ - make -C freedesktop-sdk
|
|
179 |
+ tags:
|
|
180 |
+ - overnight-tests
|
|
181 |
+ |
|
182 |
+overnight-tests-no-cache:
|
|
183 |
+ <<: *overnight-tests-template
|
|
184 |
+ script:
|
|
185 |
+ - sed -i '/artifacts:/,+1 d' freedesktop-sdk/bootstrap/project.conf
|
|
186 |
+ - sed -i '/artifacts:/,+1 d' freedesktop-sdk/project.conf
|
|
187 |
+ - make -C freedesktop-sdk
|
|
188 |
+ tags:
|
|
189 |
+ - overnight-tests
|
|
155 | 190 |
|
156 | 191 |
# Check code quality with gitlab's built-in feature.
|
157 | 192 |
#
|
... | ... | @@ -170,6 +205,8 @@ code_quality: |
170 | 205 |
--volume "$PWD":/code
|
171 | 206 |
--volume /var/run/docker.sock:/var/run/docker.sock
|
172 | 207 |
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
|
208 |
+ except:
|
|
209 |
+ - schedules
|
|
173 | 210 |
artifacts:
|
174 | 211 |
paths: [gl-code-quality-report.json]
|
175 | 212 |
|
... | ... | @@ -199,6 +236,8 @@ analysis: |
199 | 236 |
radon raw -s -j buildstream > analysis/raw.json
|
200 | 237 |
radon raw -s buildstream
|
201 | 238 |
|
239 |
+ except:
|
|
240 |
+ - schedules
|
|
202 | 241 |
artifacts:
|
203 | 242 |
paths:
|
204 | 243 |
- analysis/
|
... | ... | @@ -224,6 +263,8 @@ coverage: |
224 | 263 |
- tests-fedora-28
|
225 | 264 |
- tests-unix
|
226 | 265 |
- source_dist
|
266 |
+ except:
|
|
267 |
+ - schedules
|
|
227 | 268 |
|
228 | 269 |
# Deploy, only for merges which land on master branch.
|
229 | 270 |
#
|
... | ... | @@ -248,3 +289,5 @@ pages: |
248 | 289 |
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/35141
|
249 | 290 |
#
|
250 | 291 |
- master
|
292 |
+ except:
|
|
293 |
+ - schedules
|
... | ... | @@ -44,6 +44,11 @@ from .._exceptions import ArtifactError |
44 | 44 |
from . import ArtifactCache
|
45 | 45 |
|
46 | 46 |
|
47 |
+# The default limit for gRPC messages is 4 MiB.
|
|
48 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
49 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
50 |
+ |
|
51 |
+ |
|
47 | 52 |
# A CASCache manages artifacts in a CAS repository as specified in the
|
48 | 53 |
# Remote Execution API.
|
49 | 54 |
#
|
... | ... | @@ -854,6 +859,80 @@ class CASCache(ArtifactCache): |
854 | 859 |
|
855 | 860 |
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
|
856 | 861 |
|
862 |
+ # _ensure_blob():
|
|
863 |
+ #
|
|
864 |
+ # Fetch and add blob if it's not already local.
|
|
865 |
+ #
|
|
866 |
+ # Args:
|
|
867 |
+ # remote (Remote): The remote to use.
|
|
868 |
+ # digest (Digest): Digest object for the blob to fetch.
|
|
869 |
+ #
|
|
870 |
+ # Returns:
|
|
871 |
+ # (str): The path of the object
|
|
872 |
+ #
|
|
873 |
+ def _ensure_blob(self, remote, digest):
|
|
874 |
+ objpath = self.objpath(digest)
|
|
875 |
+ if os.path.exists(objpath):
|
|
876 |
+ # already in local repository
|
|
877 |
+ return objpath
|
|
878 |
+ |
|
879 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
880 |
+ self._fetch_blob(remote, digest, f)
|
|
881 |
+ |
|
882 |
+ added_digest = self.add_object(path=f.name)
|
|
883 |
+ assert added_digest.hash == digest.hash
|
|
884 |
+ |
|
885 |
+ return objpath
|
|
886 |
+ |
|
887 |
+ def _batch_download_complete(self, batch):
|
|
888 |
+ for digest, data in batch.send():
|
|
889 |
+ with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
890 |
+ f.write(data)
|
|
891 |
+ f.flush()
|
|
892 |
+ |
|
893 |
+ added_digest = self.add_object(path=f.name)
|
|
894 |
+ assert added_digest.hash == digest.hash
|
|
895 |
+ |
|
896 |
+ # Helper function for _fetch_directory().
|
|
897 |
+ def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
|
|
898 |
+ self._batch_download_complete(batch)
|
|
899 |
+ |
|
900 |
+ # All previously scheduled directories are now locally available,
|
|
901 |
+ # move them to the processing queue.
|
|
902 |
+ fetch_queue.extend(fetch_next_queue)
|
|
903 |
+ fetch_next_queue.clear()
|
|
904 |
+ return _CASBatchRead(remote)
|
|
905 |
+ |
|
906 |
+ # Helper function for _fetch_directory().
|
|
907 |
+ def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
|
|
908 |
+ in_local_cache = os.path.exists(self.objpath(digest))
|
|
909 |
+ |
|
910 |
+ if in_local_cache:
|
|
911 |
+ # Skip download, already in local cache.
|
|
912 |
+ pass
|
|
913 |
+ elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
914 |
+ not remote.batch_read_supported):
|
|
915 |
+ # Too large for batch request, download in independent request.
|
|
916 |
+ self._ensure_blob(remote, digest)
|
|
917 |
+ in_local_cache = True
|
|
918 |
+ else:
|
|
919 |
+ if not batch.add(digest):
|
|
920 |
+ # Not enough space left in batch request.
|
|
921 |
+ # Complete pending batch first.
|
|
922 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
923 |
+ batch.add(digest)
|
|
924 |
+ |
|
925 |
+ if recursive:
|
|
926 |
+ if in_local_cache:
|
|
927 |
+ # Add directory to processing queue.
|
|
928 |
+ fetch_queue.append(digest)
|
|
929 |
+ else:
|
|
930 |
+ # Directory will be available after completing pending batch.
|
|
931 |
+ # Add directory to deferred processing queue.
|
|
932 |
+ fetch_next_queue.append(digest)
|
|
933 |
+ |
|
934 |
+ return batch
|
|
935 |
+ |
|
857 | 936 |
# _fetch_directory():
|
858 | 937 |
#
|
859 | 938 |
# Fetches remote directory and adds it to content addressable store.
|
... | ... | @@ -867,39 +946,32 @@ class CASCache(ArtifactCache): |
867 | 946 |
# dir_digest (Digest): Digest object for the directory to fetch.
|
868 | 947 |
#
|
869 | 948 |
def _fetch_directory(self, remote, dir_digest):
|
870 |
- objpath = self.objpath(dir_digest)
|
|
871 |
- if os.path.exists(objpath):
|
|
872 |
- # already in local cache
|
|
873 |
- return
|
|
874 |
- |
|
875 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
|
|
876 |
- self._fetch_blob(remote, dir_digest, out)
|
|
877 |
- |
|
878 |
- directory = remote_execution_pb2.Directory()
|
|
949 |
+ fetch_queue = [dir_digest]
|
|
950 |
+ fetch_next_queue = []
|
|
951 |
+ batch = _CASBatchRead(remote)
|
|
879 | 952 |
|
880 |
- with open(out.name, 'rb') as f:
|
|
881 |
- directory.ParseFromString(f.read())
|
|
953 |
+ while len(fetch_queue) + len(fetch_next_queue) > 0:
|
|
954 |
+ if len(fetch_queue) == 0:
|
|
955 |
+ batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
882 | 956 |
|
883 |
- for filenode in directory.files:
|
|
884 |
- fileobjpath = self.objpath(filenode.digest)
|
|
885 |
- if os.path.exists(fileobjpath):
|
|
886 |
- # already in local cache
|
|
887 |
- continue
|
|
957 |
+ dir_digest = fetch_queue.pop(0)
|
|
888 | 958 |
|
889 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
890 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
959 |
+ objpath = self._ensure_blob(remote, dir_digest)
|
|
891 | 960 |
|
892 |
- digest = self.add_object(path=f.name)
|
|
893 |
- assert digest.hash == filenode.digest.hash
|
|
961 |
+ directory = remote_execution_pb2.Directory()
|
|
962 |
+ with open(objpath, 'rb') as f:
|
|
963 |
+ directory.ParseFromString(f.read())
|
|
894 | 964 |
|
895 | 965 |
for dirnode in directory.directories:
|
896 |
- self._fetch_directory(remote, dirnode.digest)
|
|
966 |
+ batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
|
967 |
+ fetch_queue, fetch_next_queue, recursive=True)
|
|
968 |
+ |
|
969 |
+ for filenode in directory.files:
|
|
970 |
+ batch = self._fetch_directory_node(remote, filenode.digest, batch,
|
|
971 |
+ fetch_queue, fetch_next_queue)
|
|
897 | 972 |
|
898 |
- # Place directory blob only in final location when we've
|
|
899 |
- # downloaded all referenced blobs to avoid dangling
|
|
900 |
- # references in the repository.
|
|
901 |
- digest = self.add_object(path=out.name)
|
|
902 |
- assert digest.hash == dir_digest.hash
|
|
973 |
+ # Fetch final batch
|
|
974 |
+ self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
|
903 | 975 |
|
904 | 976 |
def _fetch_tree(self, remote, digest):
|
905 | 977 |
# download but do not store the Tree object
|
... | ... | @@ -914,16 +986,7 @@ class CASCache(ArtifactCache): |
914 | 986 |
tree.children.extend([tree.root])
|
915 | 987 |
for directory in tree.children:
|
916 | 988 |
for filenode in directory.files:
|
917 |
- fileobjpath = self.objpath(filenode.digest)
|
|
918 |
- if os.path.exists(fileobjpath):
|
|
919 |
- # already in local cache
|
|
920 |
- continue
|
|
921 |
- |
|
922 |
- with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
|
|
923 |
- self._fetch_blob(remote, filenode.digest, f)
|
|
924 |
- |
|
925 |
- added_digest = self.add_object(path=f.name)
|
|
926 |
- assert added_digest.hash == filenode.digest.hash
|
|
989 |
+ self._ensure_blob(remote, filenode.digest)
|
|
927 | 990 |
|
928 | 991 |
# place directory blob only in final location when we've downloaded
|
929 | 992 |
# all referenced blobs to avoid dangling references in the repository
|
... | ... | @@ -942,12 +1005,12 @@ class CASCache(ArtifactCache): |
942 | 1005 |
finished = False
|
943 | 1006 |
remaining = digest.size_bytes
|
944 | 1007 |
while not finished:
|
945 |
- chunk_size = min(remaining, 64 * 1024)
|
|
1008 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
946 | 1009 |
remaining -= chunk_size
|
947 | 1010 |
|
948 | 1011 |
request = bytestream_pb2.WriteRequest()
|
949 | 1012 |
request.write_offset = offset
|
950 |
- # max. 64 kB chunks
|
|
1013 |
+ # max. _MAX_PAYLOAD_BYTES chunks
|
|
951 | 1014 |
request.data = instream.read(chunk_size)
|
952 | 1015 |
request.resource_name = resname
|
953 | 1016 |
request.finish_write = remaining <= 0
|
... | ... | @@ -1035,11 +1098,78 @@ class _CASRemote(): |
1035 | 1098 |
|
1036 | 1099 |
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
|
1037 | 1100 |
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
|
1101 |
+ self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
|
|
1038 | 1102 |
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
|
1039 | 1103 |
|
1104 |
+ self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
1105 |
+ try:
|
|
1106 |
+ request = remote_execution_pb2.GetCapabilitiesRequest()
|
|
1107 |
+ response = self.capabilities.GetCapabilities(request)
|
|
1108 |
+ server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
|
|
1109 |
+ if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
|
|
1110 |
+ self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
|
|
1111 |
+ except grpc.RpcError as e:
|
|
1112 |
+ # Simply use the defaults for servers that don't implement GetCapabilities()
|
|
1113 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1114 |
+ raise
|
|
1115 |
+ |
|
1116 |
+ # Check whether the server supports BatchReadBlobs()
|
|
1117 |
+ self.batch_read_supported = False
|
|
1118 |
+ try:
|
|
1119 |
+ request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1120 |
+ response = self.cas.BatchReadBlobs(request)
|
|
1121 |
+ self.batch_read_supported = True
|
|
1122 |
+ except grpc.RpcError as e:
|
|
1123 |
+ if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
|
1124 |
+ raise
|
|
1125 |
+ |
|
1040 | 1126 |
self._initialized = True
|
1041 | 1127 |
|
1042 | 1128 |
|
1129 |
+# Represents a batch of blobs queued for fetching.
|
|
1130 |
+#
|
|
1131 |
+class _CASBatchRead():
|
|
1132 |
+ def __init__(self, remote):
|
|
1133 |
+ self._remote = remote
|
|
1134 |
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
|
|
1135 |
+ self._request = remote_execution_pb2.BatchReadBlobsRequest()
|
|
1136 |
+ self._size = 0
|
|
1137 |
+ self._sent = False
|
|
1138 |
+ |
|
1139 |
+ def add(self, digest):
|
|
1140 |
+ assert not self._sent
|
|
1141 |
+ |
|
1142 |
+ new_batch_size = self._size + digest.size_bytes
|
|
1143 |
+ if new_batch_size > self._max_total_size_bytes:
|
|
1144 |
+ # Not enough space left in current batch
|
|
1145 |
+ return False
|
|
1146 |
+ |
|
1147 |
+ request_digest = self._request.digests.add()
|
|
1148 |
+ request_digest.hash = digest.hash
|
|
1149 |
+ request_digest.size_bytes = digest.size_bytes
|
|
1150 |
+ self._size = new_batch_size
|
|
1151 |
+ return True
|
|
1152 |
+ |
|
1153 |
+ def send(self):
|
|
1154 |
+ assert not self._sent
|
|
1155 |
+ self._sent = True
|
|
1156 |
+ |
|
1157 |
+ if len(self._request.digests) == 0:
|
|
1158 |
+ return
|
|
1159 |
+ |
|
1160 |
+ batch_response = self._remote.cas.BatchReadBlobs(self._request)
|
|
1161 |
+ |
|
1162 |
+ for response in batch_response.responses:
|
|
1163 |
+ if response.status.code != grpc.StatusCode.OK.value[0]:
|
|
1164 |
+ raise ArtifactError("Failed to download blob {}: {}".format(
|
|
1165 |
+ response.digest.hash, response.status.code))
|
|
1166 |
+ if response.digest.size_bytes != len(response.data):
|
|
1167 |
+ raise ArtifactError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
|
|
1168 |
+ response.digest.hash, response.digest.size_bytes, len(response.data)))
|
|
1169 |
+ |
|
1170 |
+ yield (response.digest, response.data)
|
|
1171 |
+ |
|
1172 |
+ |
|
1043 | 1173 |
def _grouper(iterable, n):
|
1044 | 1174 |
while True:
|
1045 | 1175 |
try:
|
... | ... | @@ -38,8 +38,9 @@ from .._context import Context |
38 | 38 |
from .cascache import CASCache
|
39 | 39 |
|
40 | 40 |
|
41 |
-# The default limit for gRPC messages is 4 MiB
|
|
42 |
-_MAX_BATCH_TOTAL_SIZE_BYTES = 4 * 1024 * 1024
|
|
41 |
+# The default limit for gRPC messages is 4 MiB.
|
|
42 |
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
|
|
43 |
+_MAX_PAYLOAD_BYTES = 1024 * 1024
|
|
43 | 44 |
|
44 | 45 |
|
45 | 46 |
# Trying to push an artifact that is too large
|
... | ... | @@ -158,7 +159,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): |
158 | 159 |
|
159 | 160 |
remaining = client_digest.size_bytes - request.read_offset
|
160 | 161 |
while remaining > 0:
|
161 |
- chunk_size = min(remaining, 64 * 1024)
|
|
162 |
+ chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
|
|
162 | 163 |
remaining -= chunk_size
|
163 | 164 |
|
164 | 165 |
response = bytestream_pb2.ReadResponse()
|
... | ... | @@ -242,7 +243,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres |
242 | 243 |
|
243 | 244 |
for digest in request.digests:
|
244 | 245 |
batch_size += digest.size_bytes
|
245 |
- if batch_size > _MAX_BATCH_TOTAL_SIZE_BYTES:
|
|
246 |
+ if batch_size > _MAX_PAYLOAD_BYTES:
|
|
246 | 247 |
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
247 | 248 |
return response
|
248 | 249 |
|
... | ... | @@ -269,7 +270,7 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): |
269 | 270 |
cache_capabilities = response.cache_capabilities
|
270 | 271 |
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
|
271 | 272 |
cache_capabilities.action_cache_update_capabilities.update_enabled = False
|
272 |
- cache_capabilities.max_batch_total_size_bytes = _MAX_BATCH_TOTAL_SIZE_BYTES
|
|
273 |
+ cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
|
|
273 | 274 |
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
|
274 | 275 |
|
275 | 276 |
response.deprecated_api_version.major = 2
|
... | ... | @@ -630,7 +630,7 @@ def shell(app, element, sysroot, mount, isolate, build_, command): |
630 | 630 |
@click.option('--force', '-f', default=False, is_flag=True,
|
631 | 631 |
help="Allow files to be overwritten")
|
632 | 632 |
@click.option('--deps', '-d', default='run',
|
633 |
- type=click.Choice(['run', 'none']),
|
|
633 |
+ type=click.Choice(['run', 'build', 'none']),
|
|
634 | 634 |
help='The dependencies to checkout (default: run)')
|
635 | 635 |
@click.option('--integrate/--no-integrate', default=True, is_flag=True,
|
636 | 636 |
help="Whether to run integration commands")
|
... | ... | @@ -423,7 +423,7 @@ class Element(Plugin): |
423 | 423 |
visited=visited, recursed=True)
|
424 | 424 |
|
425 | 425 |
# Yeild self only at the end, after anything needed has been traversed
|
426 |
- if should_yield and (recurse or recursed) and (scope == Scope.ALL or scope == Scope.RUN):
|
|
426 |
+ if should_yield and (recurse or recursed) and scope != Scope.BUILD:
|
|
427 | 427 |
yield self
|
428 | 428 |
|
429 | 429 |
def search(self, scope, name):
|
... | ... | @@ -671,37 +671,37 @@ class Element(Plugin): |
671 | 671 |
if self.__can_build_incrementally() and workspace.last_successful:
|
672 | 672 |
old_dep_keys = self.__get_artifact_metadata_dependencies(workspace.last_successful)
|
673 | 673 |
|
674 |
- for dep in self.dependencies(scope):
|
|
674 |
+ for element in self.dependencies(scope):
|
|
675 | 675 |
# If we are workspaced, and we therefore perform an
|
676 | 676 |
# incremental build, we must ensure that we update the mtimes
|
677 | 677 |
# of any files created by our dependencies since the last
|
678 | 678 |
# successful build.
|
679 | 679 |
to_update = None
|
680 | 680 |
if workspace and old_dep_keys:
|
681 |
- dep.__assert_cached()
|
|
681 |
+ element.__assert_cached()
|
|
682 | 682 |
|
683 |
- if dep.name in old_dep_keys:
|
|
684 |
- key_new = dep._get_cache_key()
|
|
685 |
- key_old = old_dep_keys[dep.name]
|
|
683 |
+ if element.name in old_dep_keys:
|
|
684 |
+ key_new = element._get_cache_key()
|
|
685 |
+ key_old = old_dep_keys[element.name]
|
|
686 | 686 |
|
687 | 687 |
# We only need to worry about modified and added
|
688 | 688 |
# files, since removed files will be picked up by
|
689 | 689 |
# build systems anyway.
|
690 |
- to_update, _, added = self.__artifacts.diff(dep, key_old, key_new, subdir='files')
|
|
691 |
- workspace.add_running_files(dep.name, to_update + added)
|
|
692 |
- to_update.extend(workspace.running_files[dep.name])
|
|
690 |
+ to_update, _, added = self.__artifacts.diff(element, key_old, key_new, subdir='files')
|
|
691 |
+ workspace.add_running_files(element.name, to_update + added)
|
|
692 |
+ to_update.extend(workspace.running_files[element.name])
|
|
693 | 693 |
|
694 | 694 |
# In case we are running `bst shell`, this happens in the
|
695 | 695 |
# main process and we need to update the workspace config
|
696 | 696 |
if utils._is_main_process():
|
697 | 697 |
self._get_context().get_workspaces().save_config()
|
698 | 698 |
|
699 |
- result = dep.stage_artifact(sandbox,
|
|
700 |
- path=path,
|
|
701 |
- include=include,
|
|
702 |
- exclude=exclude,
|
|
703 |
- orphans=orphans,
|
|
704 |
- update_mtimes=to_update)
|
|
699 |
+ result = element.stage_artifact(sandbox,
|
|
700 |
+ path=path,
|
|
701 |
+ include=include,
|
|
702 |
+ exclude=exclude,
|
|
703 |
+ orphans=orphans,
|
|
704 |
+ update_mtimes=to_update)
|
|
705 | 705 |
if result.overwritten:
|
706 | 706 |
for overwrite in result.overwritten:
|
707 | 707 |
# Completely new overwrite
|
... | ... | @@ -710,13 +710,13 @@ class Element(Plugin): |
710 | 710 |
# written the element before
|
711 | 711 |
for elm, contents in files_written.items():
|
712 | 712 |
if overwrite in contents:
|
713 |
- overlaps[overwrite] = [elm, dep.name]
|
|
713 |
+ overlaps[overwrite] = [elm, element.name]
|
|
714 | 714 |
else:
|
715 |
- overlaps[overwrite].append(dep.name)
|
|
716 |
- files_written[dep.name] = result.files_written
|
|
715 |
+ overlaps[overwrite].append(element.name)
|
|
716 |
+ files_written[element.name] = result.files_written
|
|
717 | 717 |
|
718 | 718 |
if result.ignored:
|
719 |
- ignored[dep.name] = result.ignored
|
|
719 |
+ ignored[element.name] = result.ignored
|
|
720 | 720 |
|
721 | 721 |
if overlaps:
|
722 | 722 |
overlap_warning = False
|
... | ... | @@ -1318,17 +1318,24 @@ class Element(Plugin): |
1318 | 1318 |
if scope == Scope.BUILD:
|
1319 | 1319 |
self.stage(sandbox)
|
1320 | 1320 |
elif scope == Scope.RUN:
|
1321 |
+ |
|
1322 |
+ if deps == 'build':
|
|
1323 |
+ dependency_scope = Scope.BUILD
|
|
1324 |
+ elif deps == 'run':
|
|
1325 |
+ dependency_scope = Scope.RUN
|
|
1326 |
+ else:
|
|
1327 |
+ dependency_scope = Scope.NONE
|
|
1328 |
+ |
|
1321 | 1329 |
# Stage deps in the sandbox root
|
1322 |
- if deps == 'run':
|
|
1323 |
- with self.timed_activity("Staging dependencies", silent_nested=True):
|
|
1324 |
- self.stage_dependency_artifacts(sandbox, scope)
|
|
1330 |
+ with self.timed_activity("Staging dependencies", silent_nested=True):
|
|
1331 |
+ self.stage_dependency_artifacts(sandbox, dependency_scope)
|
|
1325 | 1332 |
|
1326 |
- # Run any integration commands provided by the dependencies
|
|
1327 |
- # once they are all staged and ready
|
|
1328 |
- if integrate:
|
|
1329 |
- with self.timed_activity("Integrating sandbox"):
|
|
1330 |
- for dep in self.dependencies(scope):
|
|
1331 |
- dep.integrate(sandbox)
|
|
1333 |
+ # Run any integration commands provided by the dependencies
|
|
1334 |
+ # once they are all staged and ready
|
|
1335 |
+ if integrate:
|
|
1336 |
+ with self.timed_activity("Integrating sandbox"):
|
|
1337 |
+ for dep in self.dependencies(dependency_scope):
|
|
1338 |
+ dep.integrate(sandbox)
|
|
1332 | 1339 |
|
1333 | 1340 |
yield sandbox
|
1334 | 1341 |
|
... | ... | @@ -1532,8 +1539,6 @@ class Element(Plugin): |
1532 | 1539 |
with _signals.terminator(cleanup_rootdir), \
|
1533 | 1540 |
self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # nopep8
|
1534 | 1541 |
|
1535 |
- sandbox_vroot = sandbox.get_virtual_directory()
|
|
1536 |
- |
|
1537 | 1542 |
# By default, the dynamic public data is the same as the static public data.
|
1538 | 1543 |
# The plugin's assemble() method may modify this, though.
|
1539 | 1544 |
self.__dynamic_public = _yaml.node_copy(self.__public)
|
... | ... | @@ -1581,7 +1586,6 @@ class Element(Plugin): |
1581 | 1586 |
finally:
|
1582 | 1587 |
if collect is not None:
|
1583 | 1588 |
try:
|
1584 |
- # Sandbox will probably have replaced its virtual directory, so get it again
|
|
1585 | 1589 |
sandbox_vroot = sandbox.get_virtual_directory()
|
1586 | 1590 |
collectvdir = sandbox_vroot.descend(collect.lstrip(os.sep).split(os.sep))
|
1587 | 1591 |
except VirtualDirectoryError:
|
... | ... | @@ -1606,6 +1610,7 @@ class Element(Plugin): |
1606 | 1610 |
collectvdir.export_files(filesdir, can_link=True)
|
1607 | 1611 |
|
1608 | 1612 |
try:
|
1613 |
+ sandbox_vroot = sandbox.get_virtual_directory()
|
|
1609 | 1614 |
sandbox_build_dir = sandbox_vroot.descend(
|
1610 | 1615 |
self.get_variable('build-root').lstrip(os.sep).split(os.sep))
|
1611 | 1616 |
# Hard link files from build-root dir to buildtreedir directory
|
... | ... | @@ -59,3 +59,7 @@ class Scope(Enum): |
59 | 59 |
"""All elements required for running the element. Including the element
|
60 | 60 |
itself.
|
61 | 61 |
"""
|
62 |
+ |
|
63 |
+ NONE = 4
|
|
64 |
+ """Just the element itself, no dependencies.
|
|
65 |
+ """
|
... | ... | @@ -110,6 +110,10 @@ class Sandbox(): |
110 | 110 |
os.makedirs(directory_, exist_ok=True)
|
111 | 111 |
self._vdir = None
|
112 | 112 |
|
113 |
+ # This is set if anyone requests access to the underlying
|
|
114 |
+ # directory via get_directory.
|
|
115 |
+ self._never_cache_vdirs = False
|
|
116 |
+ |
|
113 | 117 |
def get_directory(self):
|
114 | 118 |
"""Fetches the sandbox root directory
|
115 | 119 |
|
... | ... | @@ -122,24 +126,28 @@ class Sandbox(): |
122 | 126 |
|
123 | 127 |
"""
|
124 | 128 |
if self.__allow_real_directory:
|
129 |
+ self._never_cache_vdirs = True
|
|
125 | 130 |
return self._root
|
126 | 131 |
else:
|
127 | 132 |
raise BstError("You can't use get_directory")
|
128 | 133 |
|
129 | 134 |
def get_virtual_directory(self):
|
130 |
- """Fetches the sandbox root directory
|
|
135 |
+ """Fetches the sandbox root directory as a virtual Directory.
|
|
131 | 136 |
|
132 | 137 |
The root directory is where artifacts for the base
|
133 |
- runtime environment should be staged. Only works if
|
|
134 |
- BST_VIRTUAL_DIRECTORY is not set.
|
|
138 |
+ runtime environment should be staged.
|
|
139 |
+ |
|
140 |
+ Use caution if you use get_directory and
|
|
141 |
+ get_virtual_directory. If you alter the contents of the
|
|
142 |
+ directory returned by get_directory, all objects returned by
|
|
143 |
+ get_virtual_directory or derived from them are invalid and you
|
|
144 |
+ must call get_virtual_directory again to get a new copy.
|
|
135 | 145 |
|
136 | 146 |
Returns:
|
137 |
- (str): The sandbox root directory
|
|
147 |
+ (Directory): The sandbox root directory
|
|
138 | 148 |
|
139 | 149 |
"""
|
140 |
- if not self._vdir:
|
|
141 |
- # BST_CAS_DIRECTORIES is a deliberately hidden environment variable which
|
|
142 |
- # can be used to switch on CAS-based directories for testing.
|
|
150 |
+ if self._vdir is None or self._never_cache_vdirs:
|
|
143 | 151 |
if 'BST_CAS_DIRECTORIES' in os.environ:
|
144 | 152 |
self._vdir = CasBasedDirectory(self.__context, ref=None)
|
145 | 153 |
else:
|
... | ... | @@ -61,13 +61,14 @@ def test_build_checkout(datafiles, cli, strict, hardlinks): |
61 | 61 |
|
62 | 62 |
|
63 | 63 |
@pytest.mark.datafiles(DATA_DIR)
|
64 |
-@pytest.mark.parametrize("deps", [("run"), ("none")])
|
|
64 |
+@pytest.mark.parametrize("deps", [("run"), ("none"), ("build")])
|
|
65 | 65 |
def test_build_checkout_deps(datafiles, cli, deps):
|
66 | 66 |
project = os.path.join(datafiles.dirname, datafiles.basename)
|
67 | 67 |
checkout = os.path.join(cli.directory, 'checkout')
|
68 |
+ element_name = "checkout-deps.bst"
|
|
68 | 69 |
|
69 | 70 |
# First build it
|
70 |
- result = cli.run(project=project, args=['build', 'target.bst'])
|
|
71 |
+ result = cli.run(project=project, args=['build', element_name])
|
|
71 | 72 |
result.assert_success()
|
72 | 73 |
|
73 | 74 |
# Assert that after a successful build, the builddir is empty
|
... | ... | @@ -76,20 +77,25 @@ def test_build_checkout_deps(datafiles, cli, deps): |
76 | 77 |
assert not os.listdir(builddir)
|
77 | 78 |
|
78 | 79 |
# Now check it out
|
79 |
- result = cli.run(project=project, args=['checkout', 'target.bst', '--deps', deps, checkout])
|
|
80 |
+ result = cli.run(project=project, args=['checkout', element_name, '--deps', deps, checkout])
|
|
80 | 81 |
result.assert_success()
|
81 | 82 |
|
82 |
- # Check that the executable hello file is found in the checkout
|
|
83 |
- filename = os.path.join(checkout, 'usr', 'bin', 'hello')
|
|
83 |
+ # Verify output of this element
|
|
84 |
+ filename = os.path.join(checkout, 'etc', 'buildstream', 'config')
|
|
85 |
+ if deps == "build":
|
|
86 |
+ assert not os.path.exists(filename)
|
|
87 |
+ else:
|
|
88 |
+ assert os.path.exists(filename)
|
|
84 | 89 |
|
85 |
- if deps == "run":
|
|
90 |
+ # Verify output of this element's build dependencies
|
|
91 |
+ filename = os.path.join(checkout, 'usr', 'include', 'pony.h')
|
|
92 |
+ if deps == "build":
|
|
86 | 93 |
assert os.path.exists(filename)
|
87 | 94 |
else:
|
88 | 95 |
assert not os.path.exists(filename)
|
89 | 96 |
|
90 |
- # Check that the executable hello file is found in the checkout
|
|
91 |
- filename = os.path.join(checkout, 'usr', 'include', 'pony.h')
|
|
92 |
- |
|
97 |
+ # Verify output of this element's runtime dependencies
|
|
98 |
+ filename = os.path.join(checkout, 'usr', 'bin', 'hello')
|
|
93 | 99 |
if deps == "run":
|
94 | 100 |
assert os.path.exists(filename)
|
95 | 101 |
else:
|
1 |
+kind: import
|
|
2 |
+description: It is important for this element to have both build and runtime dependencies
|
|
3 |
+sources:
|
|
4 |
+- kind: local
|
|
5 |
+ path: files/etc-files
|
|
6 |
+depends:
|
|
7 |
+- filename: import-dev.bst
|
|
8 |
+ type: build
|
|
9 |
+- filename: import-bin.bst
|
|
10 |
+ type: runtime
|
1 |
+config
|