Thomas Coldrick pushed to branch coldtom/strip-rules at BuildStream / buildstream
Commits:
-
5cdfac81
by Mathieu Bridon at 2018-09-28T09:20:20Z
-
5d3f039f
by Tom Pollard at 2018-09-28T10:34:13Z
-
14e1a3b3
by Jürg Billeter at 2018-10-01T08:18:05Z
-
232662f1
by Jürg Billeter at 2018-10-01T08:53:06Z
-
f447aedd
by Tiago Gomes at 2018-10-01T10:33:11Z
-
682dddce
by Tiago Gomes at 2018-10-01T10:35:12Z
-
fafa8136
by Tiago Gomes at 2018-10-01T10:59:54Z
-
26e1a3c7
by Jürg Billeter at 2018-10-01T14:58:06Z
-
f47895c0
by Jürg Billeter at 2018-10-01T14:58:06Z
-
cf00c0a1
by Jürg Billeter at 2018-10-01T15:32:30Z
-
5f4ae90b
by Jürg Billeter at 2018-10-02T06:34:02Z
-
0458bc4e
by Jürg Billeter at 2018-10-02T07:08:35Z
-
d5b396e0
by Phillip Smyth at 2018-10-02T16:40:18Z
-
dae842fd
by Phillip Smyth at 2018-10-02T17:14:09Z
-
8dc16d3f
by Jürg Billeter at 2018-10-03T05:08:21Z
-
66446fc3
by Jürg Billeter at 2018-10-03T05:36:38Z
-
29c19bea
by Tristan Van Berkom at 2018-10-03T07:33:48Z
-
b645881c
by Tristan Van Berkom at 2018-10-03T07:33:48Z
-
c9437616
by Tristan Van Berkom at 2018-10-03T08:07:15Z
-
11320fe2
by Tristan Van Berkom at 2018-10-03T09:33:39Z
-
91271964
by Tristan Van Berkom at 2018-10-03T09:59:40Z
-
3bf895d2
by Jonathan Maw at 2018-10-03T11:48:48Z
-
e4969807
by Jonathan Maw at 2018-10-03T12:48:07Z
-
a0814399
by Tristan Van Berkom at 2018-10-03T13:05:52Z
-
0a1f8e3c
by Tristan Van Berkom at 2018-10-03T13:42:20Z
-
11161f99
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
3e797bb9
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
d9020e43
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
3e5ff5a9
by Tristan Van Berkom at 2018-10-03T14:09:51Z
-
e8179c34
by Jim MacArthur at 2018-10-03T16:06:59Z
-
3cf38c8e
by Jim MacArthur at 2018-10-03T16:44:02Z
-
59c92bda
by Daniel Silverstone at 2018-10-04T08:12:20Z
-
b9ddcd0e
by Daniel Silverstone at 2018-10-04T08:12:20Z
-
df0d5a8b
by Daniel Silverstone at 2018-10-04T09:04:21Z
-
b8421a9c
by Daniel Silverstone at 2018-10-04T09:04:21Z
-
c74bfbe5
by Daniel Silverstone at 2018-10-04T09:04:21Z
-
c46a7e87
by Daniel Silverstone at 2018-10-04T09:13:04Z
-
fd6a9573
by Jürg Billeter at 2018-10-04T09:45:58Z
-
e7beb292
by Thomas Coldrick at 2018-10-04T10:53:22Z
-
c31cb89a
by Thomas Coldrick at 2018-10-04T10:53:22Z
23 changed files:
- .gitlab-ci.yml
- buildstream/_artifactcache/cascache.py
- buildstream/_artifactcache/casserver.py
- buildstream/_platform/darwin.py
- buildstream/_platform/linux.py
- buildstream/_platform/platform.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/queues/queue.py
- buildstream/_scheduler/scheduler.py
- buildstream/_site.py
- buildstream/data/projectconfig.yaml
- buildstream/data/userconfig.yaml
- buildstream/element.py
- buildstream/plugins/sources/git.py
- buildstream/sandbox/_sandboxdummy.py
- buildstream/sandbox/_sandboxremote.py
- buildstream/source.py
- buildstream/utils.py
- setup.py
- tests/cachekey/project/elements/build1.expected
- tests/cachekey/project/elements/build2.expected
- tests/cachekey/project/target.expected
- tests/frontend/mirror.py
Changes:
... | ... | @@ -145,7 +145,8 @@ docs: |
145 | 145 |
stage: test
|
146 | 146 |
script:
|
147 | 147 |
- export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
|
148 |
- - pip3 install sphinx
|
|
148 |
+ # Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
|
|
149 |
+ - pip3 install sphinx==1.7.9
|
|
149 | 150 |
- pip3 install sphinx-click
|
150 | 151 |
- pip3 install sphinx_rtd_theme
|
151 | 152 |
- cd dist && ./unpack.sh && cd buildstream
|
... | ... | @@ -161,14 +162,14 @@ docs: |
161 | 162 |
.overnight-tests: &overnight-tests-template
|
162 | 163 |
stage: test
|
163 | 164 |
variables:
|
164 |
- bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
|
|
165 |
- bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
166 |
- fd_sdk_ref: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.12
|
|
165 |
+ BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
|
|
166 |
+ BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
167 |
+ FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
|
|
167 | 168 |
before_script:
|
168 | 169 |
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
|
169 |
- - pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
|
|
170 |
+ - pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
|
|
170 | 171 |
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
|
171 |
- - git -C freedesktop-sdk checkout ${fd_sdk_ref}
|
|
172 |
+ - git -C freedesktop-sdk checkout ${FD_SDK_REF}
|
|
172 | 173 |
only:
|
173 | 174 |
- schedules
|
174 | 175 |
|
... | ... | @@ -506,7 +506,7 @@ class CASCache(ArtifactCache): |
506 | 506 |
def set_ref(self, ref, tree):
|
507 | 507 |
refpath = self._refpath(ref)
|
508 | 508 |
os.makedirs(os.path.dirname(refpath), exist_ok=True)
|
509 |
- with utils.save_file_atomic(refpath, 'wb') as f:
|
|
509 |
+ with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
|
|
510 | 510 |
f.write(tree.SerializeToString())
|
511 | 511 |
|
512 | 512 |
# resolve_ref():
|
... | ... | @@ -1048,10 +1048,29 @@ class CASCache(ArtifactCache): |
1048 | 1048 |
missing_blobs[d.hash] = d
|
1049 | 1049 |
|
1050 | 1050 |
# Upload any blobs missing on the server
|
1051 |
- for blob_digest in missing_blobs.values():
|
|
1052 |
- with open(self.objpath(blob_digest), 'rb') as f:
|
|
1053 |
- assert os.fstat(f.fileno()).st_size == blob_digest.size_bytes
|
|
1054 |
- self._send_blob(remote, blob_digest, f, u_uid=u_uid)
|
|
1051 |
+ self._send_blobs(remote, missing_blobs.values(), u_uid)
|
|
1052 |
+ |
|
1053 |
+ def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
|
|
1054 |
+ batch = _CASBatchUpdate(remote)
|
|
1055 |
+ |
|
1056 |
+ for digest in digests:
|
|
1057 |
+ with open(self.objpath(digest), 'rb') as f:
|
|
1058 |
+ assert os.fstat(f.fileno()).st_size == digest.size_bytes
|
|
1059 |
+ |
|
1060 |
+ if (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
1061 |
+ not remote.batch_update_supported):
|
|
1062 |
+ # Too large for batch request, upload in independent request.
|
|
1063 |
+ self._send_blob(remote, digest, f, u_uid=u_uid)
|
|
1064 |
+ else:
|
|
1065 |
+ if not batch.add(digest, f):
|
|
1066 |
+ # Not enough space left in batch request.
|
|
1067 |
+ # Complete pending batch first.
|
|
1068 |
+ batch.send()
|
|
1069 |
+ batch = _CASBatchUpdate(remote)
|
|
1070 |
+ batch.add(digest, f)
|
|
1071 |
+ |
|
1072 |
+ # Send final batch
|
|
1073 |
+ batch.send()
|
|
1055 | 1074 |
|
1056 | 1075 |
|
1057 | 1076 |
# Represents a single remote CAS cache.
|
... | ... | @@ -1126,6 +1145,17 @@ class _CASRemote(): |
1126 | 1145 |
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
1127 | 1146 |
raise
|
1128 | 1147 |
|
1148 |
+ # Check whether the server supports BatchUpdateBlobs()
|
|
1149 |
+ self.batch_update_supported = False
|
|
1150 |
+ try:
|
|
1151 |
+ request = remote_execution_pb2.BatchUpdateBlobsRequest()
|
|
1152 |
+ response = self.cas.BatchUpdateBlobs(request)
|
|
1153 |
+ self.batch_update_supported = True
|
|
1154 |
+ except grpc.RpcError as e:
|
|
1155 |
+ if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
|
|
1156 |
+ e.code() != grpc.StatusCode.PERMISSION_DENIED):
|
|
1157 |
+ raise
|
|
1158 |
+ |
|
1129 | 1159 |
self._initialized = True
|
1130 | 1160 |
|
1131 | 1161 |
|
... | ... | @@ -1173,6 +1203,46 @@ class _CASBatchRead(): |
1173 | 1203 |
yield (response.digest, response.data)
|
1174 | 1204 |
|
1175 | 1205 |
|
1206 |
+# Represents a batch of blobs queued for upload.
|
|
1207 |
+#
|
|
1208 |
+class _CASBatchUpdate():
|
|
1209 |
+ def __init__(self, remote):
|
|
1210 |
+ self._remote = remote
|
|
1211 |
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
|
|
1212 |
+ self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
|
|
1213 |
+ self._size = 0
|
|
1214 |
+ self._sent = False
|
|
1215 |
+ |
|
1216 |
+ def add(self, digest, stream):
|
|
1217 |
+ assert not self._sent
|
|
1218 |
+ |
|
1219 |
+ new_batch_size = self._size + digest.size_bytes
|
|
1220 |
+ if new_batch_size > self._max_total_size_bytes:
|
|
1221 |
+ # Not enough space left in current batch
|
|
1222 |
+ return False
|
|
1223 |
+ |
|
1224 |
+ blob_request = self._request.requests.add()
|
|
1225 |
+ blob_request.digest.hash = digest.hash
|
|
1226 |
+ blob_request.digest.size_bytes = digest.size_bytes
|
|
1227 |
+ blob_request.data = stream.read(digest.size_bytes)
|
|
1228 |
+ self._size = new_batch_size
|
|
1229 |
+ return True
|
|
1230 |
+ |
|
1231 |
+ def send(self):
|
|
1232 |
+ assert not self._sent
|
|
1233 |
+ self._sent = True
|
|
1234 |
+ |
|
1235 |
+ if len(self._request.requests) == 0:
|
|
1236 |
+ return
|
|
1237 |
+ |
|
1238 |
+ batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
|
|
1239 |
+ |
|
1240 |
+ for response in batch_response.responses:
|
|
1241 |
+ if response.status.code != grpc.StatusCode.OK.value[0]:
|
|
1242 |
+ raise ArtifactError("Failed to upload blob {}: {}".format(
|
|
1243 |
+ response.digest.hash, response.status.code))
|
|
1244 |
+ |
|
1245 |
+ |
|
1176 | 1246 |
def _grouper(iterable, n):
|
1177 | 1247 |
while True:
|
1178 | 1248 |
try:
|
... | ... | @@ -68,7 +68,7 @@ def create_server(repo, *, enable_push): |
68 | 68 |
_ByteStreamServicer(artifactcache, enable_push=enable_push), server)
|
69 | 69 |
|
70 | 70 |
remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
|
71 |
- _ContentAddressableStorageServicer(artifactcache), server)
|
|
71 |
+ _ContentAddressableStorageServicer(artifactcache, enable_push=enable_push), server)
|
|
72 | 72 |
|
73 | 73 |
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
|
74 | 74 |
_CapabilitiesServicer(), server)
|
... | ... | @@ -222,9 +222,10 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): |
222 | 222 |
|
223 | 223 |
|
224 | 224 |
class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
|
225 |
- def __init__(self, cas):
|
|
225 |
+ def __init__(self, cas, *, enable_push):
|
|
226 | 226 |
super().__init__()
|
227 | 227 |
self.cas = cas
|
228 |
+ self.enable_push = enable_push
|
|
228 | 229 |
|
229 | 230 |
def FindMissingBlobs(self, request, context):
|
230 | 231 |
response = remote_execution_pb2.FindMissingBlobsResponse()
|
... | ... | @@ -260,6 +261,46 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres |
260 | 261 |
|
261 | 262 |
return response
|
262 | 263 |
|
264 |
+ def BatchUpdateBlobs(self, request, context):
|
|
265 |
+ response = remote_execution_pb2.BatchUpdateBlobsResponse()
|
|
266 |
+ |
|
267 |
+ if not self.enable_push:
|
|
268 |
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
|
|
269 |
+ return response
|
|
270 |
+ |
|
271 |
+ batch_size = 0
|
|
272 |
+ |
|
273 |
+ for blob_request in request.requests:
|
|
274 |
+ digest = blob_request.digest
|
|
275 |
+ |
|
276 |
+ batch_size += digest.size_bytes
|
|
277 |
+ if batch_size > _MAX_PAYLOAD_BYTES:
|
|
278 |
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
|
279 |
+ return response
|
|
280 |
+ |
|
281 |
+ blob_response = response.responses.add()
|
|
282 |
+ blob_response.digest.hash = digest.hash
|
|
283 |
+ blob_response.digest.size_bytes = digest.size_bytes
|
|
284 |
+ |
|
285 |
+ if len(blob_request.data) != digest.size_bytes:
|
|
286 |
+ blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
|
|
287 |
+ continue
|
|
288 |
+ |
|
289 |
+ try:
|
|
290 |
+ _clean_up_cache(self.cas, digest.size_bytes)
|
|
291 |
+ |
|
292 |
+ with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
|
|
293 |
+ out.write(blob_request.data)
|
|
294 |
+ out.flush()
|
|
295 |
+ server_digest = self.cas.add_object(path=out.name)
|
|
296 |
+ if server_digest.hash != digest.hash:
|
|
297 |
+ blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
|
|
298 |
+ |
|
299 |
+ except ArtifactTooLargeException:
|
|
300 |
+ blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
|
|
301 |
+ |
|
302 |
+ return response
|
|
303 |
+ |
|
263 | 304 |
|
264 | 305 |
class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
|
265 | 306 |
def GetCapabilities(self, request, context):
|
... | ... | @@ -34,6 +34,9 @@ class Darwin(Platform): |
34 | 34 |
super().__init__()
|
35 | 35 |
|
36 | 36 |
def create_sandbox(self, *args, **kwargs):
|
37 |
+ kwargs['dummy_reason'] = \
|
|
38 |
+ "OSXFUSE is not supported and there are no supported sandbox" + \
|
|
39 |
+ "technologies for OSX at this time"
|
|
37 | 40 |
return SandboxDummy(*args, **kwargs)
|
38 | 41 |
|
39 | 42 |
def check_sandbox_config(self, config):
|
... | ... | @@ -41,10 +44,11 @@ class Darwin(Platform): |
41 | 44 |
return True
|
42 | 45 |
|
43 | 46 |
def get_cpu_count(self, cap=None):
|
44 |
- if cap < os.cpu_count():
|
|
45 |
- return cap
|
|
47 |
+ cpu_count = os.cpu_count()
|
|
48 |
+ if cap is None:
|
|
49 |
+ return cpu_count
|
|
46 | 50 |
else:
|
47 |
- return os.cpu_count()
|
|
51 |
+ return min(cpu_count, cap)
|
|
48 | 52 |
|
49 | 53 |
def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
|
50 | 54 |
super().set_resource_limits(soft_limit)
|
... | ... | @@ -37,24 +37,30 @@ class Linux(Platform): |
37 | 37 |
self._uid = os.geteuid()
|
38 | 38 |
self._gid = os.getegid()
|
39 | 39 |
|
40 |
+ self._have_fuse = os.path.exists("/dev/fuse")
|
|
41 |
+ self._bwrap_exists = _site.check_bwrap_version(0, 0, 0)
|
|
42 |
+ self._have_good_bwrap = _site.check_bwrap_version(0, 1, 2)
|
|
43 |
+ |
|
44 |
+ self._local_sandbox_available = self._have_fuse and self._have_good_bwrap
|
|
45 |
+ |
|
40 | 46 |
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
|
41 | 47 |
|
42 |
- if self._local_sandbox_available():
|
|
48 |
+ if self._local_sandbox_available:
|
|
43 | 49 |
self._user_ns_available = self._check_user_ns_available()
|
44 | 50 |
else:
|
45 | 51 |
self._user_ns_available = False
|
46 | 52 |
|
47 | 53 |
def create_sandbox(self, *args, **kwargs):
|
48 |
- if not self._local_sandbox_available():
|
|
49 |
- return SandboxDummy(*args, **kwargs)
|
|
54 |
+ if not self._local_sandbox_available:
|
|
55 |
+ return self._create_dummy_sandbox(*args, **kwargs)
|
|
50 | 56 |
else:
|
51 |
- from ..sandbox._sandboxbwrap import SandboxBwrap
|
|
52 |
- # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
|
|
53 |
- kwargs['user_ns_available'] = self._user_ns_available
|
|
54 |
- kwargs['die_with_parent_available'] = self._die_with_parent_available
|
|
55 |
- return SandboxBwrap(*args, **kwargs)
|
|
57 |
+ return self._create_bwrap_sandbox(*args, **kwargs)
|
|
56 | 58 |
|
57 | 59 |
def check_sandbox_config(self, config):
|
60 |
+ if not self._local_sandbox_available:
|
|
61 |
+ # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
|
|
62 |
+ return True
|
|
63 |
+ |
|
58 | 64 |
if self._user_ns_available:
|
59 | 65 |
# User namespace support allows arbitrary build UID/GID settings.
|
60 | 66 |
return True
|
... | ... | @@ -66,11 +72,26 @@ class Linux(Platform): |
66 | 72 |
################################################
|
67 | 73 |
# Private Methods #
|
68 | 74 |
################################################
|
69 |
- def _local_sandbox_available(self):
|
|
70 |
- try:
|
|
71 |
- return os.path.exists(utils.get_host_tool('bwrap')) and os.path.exists('/dev/fuse')
|
|
72 |
- except utils.ProgramNotFoundError:
|
|
73 |
- return False
|
|
75 |
+ |
|
76 |
+ def _create_dummy_sandbox(self, *args, **kwargs):
|
|
77 |
+ reasons = []
|
|
78 |
+ if not self._have_fuse:
|
|
79 |
+ reasons.append("FUSE is unavailable")
|
|
80 |
+ if not self._have_good_bwrap:
|
|
81 |
+ if self._bwrap_exists:
|
|
82 |
+ reasons.append("`bwrap` is too old (bst needs at least 0.1.2)")
|
|
83 |
+ else:
|
|
84 |
+ reasons.append("`bwrap` executable not found")
|
|
85 |
+ |
|
86 |
+ kwargs['dummy_reason'] = " and ".join(reasons)
|
|
87 |
+ return SandboxDummy(*args, **kwargs)
|
|
88 |
+ |
|
89 |
+ def _create_bwrap_sandbox(self, *args, **kwargs):
|
|
90 |
+ from ..sandbox._sandboxbwrap import SandboxBwrap
|
|
91 |
+ # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
|
|
92 |
+ kwargs['user_ns_available'] = self._user_ns_available
|
|
93 |
+ kwargs['die_with_parent_available'] = self._die_with_parent_available
|
|
94 |
+ return SandboxBwrap(*args, **kwargs)
|
|
74 | 95 |
|
75 | 96 |
def _check_user_ns_available(self):
|
76 | 97 |
# Here, lets check if bwrap is able to create user namespaces,
|
... | ... | @@ -67,7 +67,11 @@ class Platform(): |
67 | 67 |
return cls._instance
|
68 | 68 |
|
69 | 69 |
def get_cpu_count(self, cap=None):
|
70 |
- return min(len(os.sched_getaffinity(0)), cap)
|
|
70 |
+ cpu_count = len(os.sched_getaffinity(0))
|
|
71 |
+ if cap is None:
|
|
72 |
+ return cpu_count
|
|
73 |
+ else:
|
|
74 |
+ return min(cpu_count, cap)
|
|
71 | 75 |
|
72 | 76 |
##################################################################
|
73 | 77 |
# Sandbox functions #
|
... | ... | @@ -119,6 +119,8 @@ class Job(): |
119 | 119 |
self._result = None # Return value of child action in the parent
|
120 | 120 |
self._tries = 0 # Try count, for retryable jobs
|
121 | 121 |
self._skipped_flag = False # Indicate whether the job was skipped.
|
122 |
+ self._terminated = False # Whether this job has been explicitly terminated
|
|
123 |
+ |
|
122 | 124 |
# If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
|
123 | 125 |
#
|
124 | 126 |
self._retry_flag = True
|
... | ... | @@ -190,6 +192,8 @@ class Job(): |
190 | 192 |
# Terminate the process using multiprocessing API pathway
|
191 | 193 |
self._process.terminate()
|
192 | 194 |
|
195 |
+ self._terminated = True
|
|
196 |
+ |
|
193 | 197 |
# terminate_wait()
|
194 | 198 |
#
|
195 | 199 |
# Wait for terminated jobs to complete
|
... | ... | @@ -273,18 +277,22 @@ class Job(): |
273 | 277 |
# running the integration commands).
|
274 | 278 |
#
|
275 | 279 |
# Args:
|
276 |
- # (int): The plugin identifier for this task
|
|
280 |
+ # task_id (int): The plugin identifier for this task
|
|
277 | 281 |
#
|
278 | 282 |
def set_task_id(self, task_id):
|
279 | 283 |
self._task_id = task_id
|
280 | 284 |
|
281 | 285 |
# skipped
|
282 | 286 |
#
|
287 |
+ # This will evaluate to True if the job was skipped
|
|
288 |
+ # during processing, or if it was forcefully terminated.
|
|
289 |
+ #
|
|
283 | 290 |
# Returns:
|
284 |
- # bool: True if the job was skipped while processing.
|
|
291 |
+ # (bool): Whether the job should appear as skipped
|
|
292 |
+ #
|
|
285 | 293 |
@property
|
286 | 294 |
def skipped(self):
|
287 |
- return self._skipped_flag
|
|
295 |
+ return self._skipped_flag or self._terminated
|
|
288 | 296 |
|
289 | 297 |
#######################################################
|
290 | 298 |
# Abstract Methods #
|
... | ... | @@ -326,16 +326,20 @@ class Queue(): |
326 | 326 |
detail=traceback.format_exc())
|
327 | 327 |
self.failed_elements.append(element)
|
328 | 328 |
else:
|
329 |
- |
|
330 |
- # No exception occured, handle the success/failure state in the normal way
|
|
331 | 329 |
#
|
330 |
+ # No exception occured in post processing
|
|
331 |
+ #
|
|
332 |
+ |
|
333 |
+ # All jobs get placed on the done queue for later processing.
|
|
332 | 334 |
self._done_queue.append(job)
|
333 | 335 |
|
334 |
- if success:
|
|
335 |
- if not job.skipped:
|
|
336 |
- self.processed_elements.append(element)
|
|
337 |
- else:
|
|
338 |
- self.skipped_elements.append(element)
|
|
336 |
+ # A Job can be skipped whether or not it has failed,
|
|
337 |
+ # we want to only bookkeep them as processed or failed
|
|
338 |
+ # if they are not skipped.
|
|
339 |
+ if job.skipped:
|
|
340 |
+ self.skipped_elements.append(element)
|
|
341 |
+ elif success:
|
|
342 |
+ self.processed_elements.append(element)
|
|
339 | 343 |
else:
|
340 | 344 |
self.failed_elements.append(element)
|
341 | 345 |
|
... | ... | @@ -387,6 +387,15 @@ class Scheduler(): |
387 | 387 |
# A loop registered event callback for keyboard interrupts
|
388 | 388 |
#
|
389 | 389 |
def _interrupt_event(self):
|
390 |
+ |
|
391 |
+ # FIXME: This should not be needed, but for some reason we receive an
|
|
392 |
+ # additional SIGINT event when the user hits ^C a second time
|
|
393 |
+ # to inform us that they really intend to terminate; even though
|
|
394 |
+ # we have disconnected our handlers at this time.
|
|
395 |
+ #
|
|
396 |
+ if self.terminated:
|
|
397 |
+ return
|
|
398 |
+ |
|
390 | 399 |
# Leave this to the frontend to decide, if no
|
391 | 400 |
# interrrupt callback was specified, then just terminate.
|
392 | 401 |
if self._interrupt_callback:
|
... | ... | @@ -78,18 +78,12 @@ def check_bwrap_version(major, minor, patch): |
78 | 78 |
if not bwrap_path:
|
79 | 79 |
return False
|
80 | 80 |
cmd = [bwrap_path, "--version"]
|
81 |
- version = str(subprocess.check_output(cmd).split()[1], "utf-8")
|
|
81 |
+ try:
|
|
82 |
+ version = str(subprocess.check_output(cmd).split()[1], "utf-8")
|
|
83 |
+ except subprocess.CalledProcessError:
|
|
84 |
+ # Failure trying to run bubblewrap
|
|
85 |
+ return False
|
|
82 | 86 |
_bwrap_major, _bwrap_minor, _bwrap_patch = map(int, version.split("."))
|
83 | 87 |
|
84 | 88 |
# Check whether the installed version meets the requirements
|
85 |
- if _bwrap_major > major:
|
|
86 |
- return True
|
|
87 |
- elif _bwrap_major < major:
|
|
88 |
- return False
|
|
89 |
- else:
|
|
90 |
- if _bwrap_minor > minor:
|
|
91 |
- return True
|
|
92 |
- elif _bwrap_minor < minor:
|
|
93 |
- return False
|
|
94 |
- else:
|
|
95 |
- return _bwrap_patch >= patch
|
|
89 |
+ return (_bwrap_major, _bwrap_minor, _bwrap_patch) >= (major, minor, patch)
|
... | ... | @@ -51,23 +51,40 @@ variables: |
51 | 51 |
|
52 | 52 |
--remove-section=.comment --remove-section=.note --strip-unneeded
|
53 | 53 |
|
54 |
+ # Causes strip-binaries to fail on error if set to true
|
|
55 |
+ fatal-strip: "false"
|
|
56 |
+ |
|
54 | 57 |
# Generic implementation for stripping debugging symbols
|
55 | 58 |
strip-binaries: |
|
56 |
- |
|
57 |
- cd "%{install-root}" && find -type f \
|
|
58 |
- '(' -perm -111 -o -name '*.so*' \
|
|
59 |
- -o -name '*.cmxs' -o -name '*.node' ')' \
|
|
60 |
- -exec sh -ec \
|
|
61 |
- 'read -n4 hdr <"$1" # check for elf header
|
|
62 |
- if [ "$hdr" != "$(printf \\x7fELF)" ]; then
|
|
63 |
- exit 0
|
|
64 |
- fi
|
|
65 |
- debugfile="%{install-root}%{debugdir}/$1"
|
|
66 |
- mkdir -p "$(dirname "$debugfile")"
|
|
67 |
- objcopy %{objcopy-extract-args} "$1" "$debugfile"
|
|
68 |
- chmod 644 "$debugfile"
|
|
69 |
- strip %{strip-args} "$1"
|
|
70 |
- objcopy %{objcopy-link-args} "$debugfile" "$1"' - {} ';'
|
|
59 |
+ find "%{install-root}" -type f \
|
|
60 |
+ '(' -perm -111 -o -name '*.so*' \
|
|
61 |
+ -o -name '*.cmxs' -o -name '*.node' ')' \
|
|
62 |
+ -print0 | while read -r -d $'\0' file; do
|
|
63 |
+ read -n4 hdr <"${file}" || continue # check for elf header
|
|
64 |
+ if [ "$hdr" != "$(printf \\x7fELF)" ]; then
|
|
65 |
+ continue
|
|
66 |
+ fi
|
|
67 |
+ if objdump -j .gnu_debuglink -s "${file}" &>/dev/null; then
|
|
68 |
+ continue
|
|
69 |
+ fi
|
|
70 |
+ case "${file}" in
|
|
71 |
+ "%{install-root}%{debugdir}/"*)
|
|
72 |
+ continue
|
|
73 |
+ ;;
|
|
74 |
+ *)
|
|
75 |
+ ;;
|
|
76 |
+ esac
|
|
77 |
+ realpath="$(realpath -s --relative-to="%{install-root}" "${file}")"
|
|
78 |
+ debugfile="%{install-root}%{debugdir}/${realpath}.debug"
|
|
79 |
+ mkdir -p "$(dirname "$debugfile")"
|
|
80 |
+ objcopy %{objcopy-extract-args} "${file}" "$debugfile"
|
|
81 |
+ chmod 644 "$debugfile"
|
|
82 |
+ mode="$(stat -c 0%a "${file}")"
|
|
83 |
+ [ -w "${file}" ] || chmod +w "${file}"
|
|
84 |
+ strip %{strip-args} "${file}" || %{fatal-strip}
|
|
85 |
+ objcopy %{objcopy-link-args} "$debugfile" "${file}"
|
|
86 |
+ chmod "${mode}" "${file}"
|
|
87 |
+ done
|
|
71 | 88 |
|
72 | 89 |
# Generic implementation for reproducible python builds
|
73 | 90 |
fix-pyc-timestamps: |
|
... | ... | @@ -191,4 +208,4 @@ shell: |
191 | 208 |
command: [ 'sh', '-i' ]
|
192 | 209 |
|
193 | 210 |
remote-execution:
|
194 |
- url: ""
|
|
\ No newline at end of file | ||
211 |
+ url: ""
|
... | ... | @@ -26,8 +26,13 @@ logdir: ${XDG_CACHE_HOME}/buildstream/logs |
26 | 26 |
# Cache
|
27 | 27 |
#
|
28 | 28 |
cache:
|
29 |
- # Size of the artifact cache - BuildStream will attempt to keep the
|
|
29 |
+ # Size of the artifact cache in bytes - BuildStream will attempt to keep the
|
|
30 | 30 |
# artifact cache within this size.
|
31 |
+ # If the value is suffixed with K, M, G or T, the specified memory size is
|
|
32 |
+ # parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base
|
|
33 |
+ # 1024), respectively.
|
|
34 |
+ # Alternatively, a percentage value may be specified, which is taken relative
|
|
35 |
+ # to the isize of the file system containing the cache.
|
|
31 | 36 |
quota: infinity
|
32 | 37 |
|
33 | 38 |
#
|
... | ... | @@ -1379,10 +1379,10 @@ class Element(Plugin): |
1379 | 1379 |
if not vdirectory.is_empty():
|
1380 | 1380 |
raise ElementError("Staging directory '{}' is not empty".format(vdirectory))
|
1381 | 1381 |
|
1382 |
- # While mkdtemp is advertised as using the TMP environment variable, it
|
|
1383 |
- # doesn't, so this explicit extraction is necesasry.
|
|
1384 |
- tmp_prefix = os.environ.get("TMP", None)
|
|
1385 |
- temp_staging_directory = tempfile.mkdtemp(prefix=tmp_prefix)
|
|
1382 |
+ # It's advantageous to have this temporary directory on
|
|
1383 |
+ # the same filing system as the rest of our cache.
|
|
1384 |
+ temp_staging_location = os.path.join(self._get_context().artifactdir, "staging_temp")
|
|
1385 |
+ temp_staging_directory = tempfile.mkdtemp(prefix=temp_staging_location)
|
|
1386 | 1386 |
|
1387 | 1387 |
try:
|
1388 | 1388 |
workspace = self._get_workspace()
|
... | ... | @@ -184,10 +184,18 @@ class GitMirror(SourceFetcher): |
184 | 184 |
cwd=self.mirror)
|
185 | 185 |
|
186 | 186 |
def fetch(self, alias_override=None):
|
187 |
- self.ensure(alias_override)
|
|
188 |
- if not self.has_ref():
|
|
189 |
- self._fetch(alias_override)
|
|
190 |
- self.assert_ref()
|
|
187 |
+ # Resolve the URL for the message
|
|
188 |
+ resolved_url = self.source.translate_url(self.url,
|
|
189 |
+ alias_override=alias_override,
|
|
190 |
+ primary=self.primary)
|
|
191 |
+ |
|
192 |
+ with self.source.timed_activity("Fetching from {}"
|
|
193 |
+ .format(resolved_url),
|
|
194 |
+ silent_nested=True):
|
|
195 |
+ self.ensure(alias_override)
|
|
196 |
+ if not self.has_ref():
|
|
197 |
+ self._fetch(alias_override)
|
|
198 |
+ self.assert_ref()
|
|
191 | 199 |
|
192 | 200 |
def has_ref(self):
|
193 | 201 |
if not self.ref:
|
... | ... | @@ -23,6 +23,7 @@ from . import Sandbox |
23 | 23 |
class SandboxDummy(Sandbox):
|
24 | 24 |
def __init__(self, *args, **kwargs):
|
25 | 25 |
super().__init__(*args, **kwargs)
|
26 |
+ self._reason = kwargs.get("dummy_reason", "no reason given")
|
|
26 | 27 |
|
27 | 28 |
def run(self, command, flags, *, cwd=None, env=None):
|
28 | 29 |
|
... | ... | @@ -37,4 +38,4 @@ class SandboxDummy(Sandbox): |
37 | 38 |
"'{}'".format(command[0]),
|
38 | 39 |
reason='missing-command')
|
39 | 40 |
|
40 |
- raise SandboxError("This platform does not support local builds")
|
|
41 |
+ raise SandboxError("This platform does not support local builds: {}".format(self._reason))
|
... | ... | @@ -177,15 +177,11 @@ class SandboxRemote(Sandbox): |
177 | 177 |
if not cascache.verify_digest_pushed(self._get_project(), upload_vdir.ref):
|
178 | 178 |
raise SandboxError("Failed to verify that source has been pushed to the remote artifact cache.")
|
179 | 179 |
|
180 |
- # Set up environment and working directory
|
|
181 |
- if cwd is None:
|
|
182 |
- cwd = self._get_work_directory()
|
|
183 |
- |
|
184 |
- if cwd is None:
|
|
185 |
- cwd = '/'
|
|
186 |
- |
|
187 |
- if env is None:
|
|
188 |
- env = self._get_environment()
|
|
180 |
+ # Fallback to the sandbox default settings for
|
|
181 |
+ # the cwd and env.
|
|
182 |
+ #
|
|
183 |
+ cwd = self._get_work_directory(cwd=cwd)
|
|
184 |
+ env = self._get_environment(cwd=cwd, env=env)
|
|
189 | 185 |
|
190 | 186 |
# We want command args as a list of strings
|
191 | 187 |
if isinstance(command, str):
|
... | ... | @@ -965,28 +965,48 @@ class Source(Plugin): |
965 | 965 |
# Tries to call fetch for every mirror, stopping once it succeeds
|
966 | 966 |
def __do_fetch(self, **kwargs):
|
967 | 967 |
project = self._get_project()
|
968 |
- source_fetchers = self.get_source_fetchers()
|
|
968 |
+ context = self._get_context()
|
|
969 |
+ |
|
970 |
+ # Silence the STATUS messages which might happen as a result
|
|
971 |
+ # of checking the source fetchers.
|
|
972 |
+ with context.silence():
|
|
973 |
+ source_fetchers = self.get_source_fetchers()
|
|
969 | 974 |
|
970 | 975 |
# Use the source fetchers if they are provided
|
971 | 976 |
#
|
972 | 977 |
if source_fetchers:
|
973 |
- for fetcher in source_fetchers:
|
|
974 |
- alias = fetcher._get_alias()
|
|
975 |
- for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
|
|
976 |
- try:
|
|
977 |
- fetcher.fetch(uri)
|
|
978 |
- # FIXME: Need to consider temporary vs. permanent failures,
|
|
979 |
- # and how this works with retries.
|
|
980 |
- except BstError as e:
|
|
981 |
- last_error = e
|
|
982 |
- continue
|
|
983 |
- |
|
984 |
- # No error, we're done with this fetcher
|
|
985 |
- break
|
|
986 | 978 |
|
987 |
- else:
|
|
988 |
- # No break occurred, raise the last detected error
|
|
989 |
- raise last_error
|
|
979 |
+ # Use a contorted loop here, this is to allow us to
|
|
980 |
+ # silence the messages which can result from consuming
|
|
981 |
+ # the items of source_fetchers, if it happens to be a generator.
|
|
982 |
+ #
|
|
983 |
+ source_fetchers = iter(source_fetchers)
|
|
984 |
+ try:
|
|
985 |
+ |
|
986 |
+ while True:
|
|
987 |
+ |
|
988 |
+ with context.silence():
|
|
989 |
+ fetcher = next(source_fetchers)
|
|
990 |
+ |
|
991 |
+ alias = fetcher._get_alias()
|
|
992 |
+ for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
|
|
993 |
+ try:
|
|
994 |
+ fetcher.fetch(uri)
|
|
995 |
+ # FIXME: Need to consider temporary vs. permanent failures,
|
|
996 |
+ # and how this works with retries.
|
|
997 |
+ except BstError as e:
|
|
998 |
+ last_error = e
|
|
999 |
+ continue
|
|
1000 |
+ |
|
1001 |
+ # No error, we're done with this fetcher
|
|
1002 |
+ break
|
|
1003 |
+ |
|
1004 |
+ else:
|
|
1005 |
+ # No break occurred, raise the last detected error
|
|
1006 |
+ raise last_error
|
|
1007 |
+ |
|
1008 |
+ except StopIteration:
|
|
1009 |
+ pass
|
|
990 | 1010 |
|
991 | 1011 |
# Default codepath is to reinstantiate the Source
|
992 | 1012 |
#
|
... | ... | @@ -502,7 +502,7 @@ def get_bst_version(): |
502 | 502 |
|
503 | 503 |
@contextmanager
|
504 | 504 |
def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
|
505 |
- errors=None, newline=None, closefd=True, opener=None):
|
|
505 |
+ errors=None, newline=None, closefd=True, opener=None, tempdir=None):
|
|
506 | 506 |
"""Save a file with a temporary name and rename it into place when ready.
|
507 | 507 |
|
508 | 508 |
This is a context manager which is meant for saving data to files.
|
... | ... | @@ -529,8 +529,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None, |
529 | 529 |
# https://bugs.python.org/issue8604
|
530 | 530 |
|
531 | 531 |
assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
|
532 |
- dirname = os.path.dirname(filename)
|
|
533 |
- fd, tempname = tempfile.mkstemp(dir=dirname)
|
|
532 |
+ if tempdir is None:
|
|
533 |
+ tempdir = os.path.dirname(filename)
|
|
534 |
+ fd, tempname = tempfile.mkstemp(dir=tempdir)
|
|
534 | 535 |
os.close(fd)
|
535 | 536 |
|
536 | 537 |
f = open(tempname, mode=mode, buffering=buffering, encoding=encoding,
|
... | ... | @@ -562,6 +563,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None, |
562 | 563 |
#
|
563 | 564 |
# Get the disk usage of a given directory in bytes.
|
564 | 565 |
#
|
566 |
+# This function assumes that files do not inadvertantly
|
|
567 |
+# disappear while this function is running.
|
|
568 |
+#
|
|
565 | 569 |
# Arguments:
|
566 | 570 |
# (str) The path whose size to check.
|
567 | 571 |
#
|
... | ... | @@ -54,12 +54,13 @@ REQUIRED_BWRAP_MINOR = 1 |
54 | 54 |
REQUIRED_BWRAP_PATCH = 2
|
55 | 55 |
|
56 | 56 |
|
57 |
-def exit_bwrap(reason):
|
|
57 |
+def warn_bwrap(reason):
|
|
58 | 58 |
print(reason +
|
59 |
- "\nBuildStream requires Bubblewrap (bwrap) for"
|
|
60 |
- " sandboxing the build environment. Install it using your package manager"
|
|
61 |
- " (usually bwrap or bubblewrap)")
|
|
62 |
- sys.exit(1)
|
|
59 |
+ "\nBuildStream requires Bubblewrap (bwrap {}.{}.{} or better),"
|
|
60 |
+ " during local builds, for"
|
|
61 |
+ " sandboxing the build environment.\nInstall it using your package manager"
|
|
62 |
+ " (usually bwrap or bubblewrap) otherwise you will be limited to"
|
|
63 |
+ " remote builds only.".format(REQUIRED_BWRAP_MAJOR, REQUIRED_BWRAP_MINOR, REQUIRED_BWRAP_PATCH))
|
|
63 | 64 |
|
64 | 65 |
|
65 | 66 |
def bwrap_too_old(major, minor, patch):
|
... | ... | @@ -76,18 +77,19 @@ def bwrap_too_old(major, minor, patch): |
76 | 77 |
return False
|
77 | 78 |
|
78 | 79 |
|
79 |
-def assert_bwrap():
|
|
80 |
+def check_for_bwrap():
|
|
80 | 81 |
platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform
|
81 | 82 |
if platform.startswith('linux'):
|
82 | 83 |
bwrap_path = shutil.which('bwrap')
|
83 | 84 |
if not bwrap_path:
|
84 |
- exit_bwrap("Bubblewrap not found")
|
|
85 |
+ warn_bwrap("Bubblewrap not found")
|
|
86 |
+ return
|
|
85 | 87 |
|
86 | 88 |
version_bytes = subprocess.check_output([bwrap_path, "--version"]).split()[1]
|
87 | 89 |
version_string = str(version_bytes, "utf-8")
|
88 | 90 |
major, minor, patch = map(int, version_string.split("."))
|
89 | 91 |
if bwrap_too_old(major, minor, patch):
|
90 |
- exit_bwrap("Bubblewrap too old")
|
|
92 |
+ warn_bwrap("Bubblewrap too old")
|
|
91 | 93 |
|
92 | 94 |
|
93 | 95 |
###########################################
|
... | ... | @@ -126,7 +128,7 @@ bst_install_entry_points = { |
126 | 128 |
}
|
127 | 129 |
|
128 | 130 |
if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
|
129 |
- assert_bwrap()
|
|
131 |
+ check_for_bwrap()
|
|
130 | 132 |
bst_install_entry_points['console_scripts'] += [
|
131 | 133 |
'bst = buildstream._frontend:cli'
|
132 | 134 |
]
|
1 |
-dd5e29baefb84f68eb4abac3a1befc332077ec4c97bb2572e57f3ca98ba46707
|
|
\ No newline at end of file | ||
1 |
+c8beac45697ce777d63ff356d7af430f718ee78264bb2b6a41ebddf4f6aeed53
|
|
\ No newline at end of file |
1 |
-99d80454cce44645597c885800edf0bf254d1c3606d869f2ccdd5043ec7685cb
|
|
\ No newline at end of file | ||
1 |
+7ba21f3a594318d9138befa4ecdb11169b791074efc44edf6729667f7e024ca4
|
|
\ No newline at end of file |
1 |
-29a1252ec30dd6ae73c772381f0eb417e3874c75710d08be819f5715dcaa942b
|
|
\ No newline at end of file | ||
1 |
+363201be403127a4e24ac079f0390e90cd80953c91ab6b26b60985d51c4d2e91
|
|
\ No newline at end of file |
... | ... | @@ -139,6 +139,82 @@ def test_mirror_fetch(cli, tmpdir, datafiles, kind): |
139 | 139 |
result.assert_success()
|
140 | 140 |
|
141 | 141 |
|
142 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
143 |
+@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
|
|
144 |
+@pytest.mark.parametrize("mirror", [("no-mirror"), ("mirror"), ("unrelated-mirror")])
|
|
145 |
+def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
|
|
146 |
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
|
|
147 |
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
|
|
148 |
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
|
|
149 |
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
|
|
150 |
+ project_dir = os.path.join(str(tmpdir), 'project')
|
|
151 |
+ os.makedirs(project_dir)
|
|
152 |
+ element_dir = os.path.join(project_dir, 'elements')
|
|
153 |
+ |
|
154 |
+ # Create repo objects of the upstream and mirror
|
|
155 |
+ upstream_repo = create_repo('tar', upstream_repodir)
|
|
156 |
+ upstream_ref = upstream_repo.create(bin_files_path)
|
|
157 |
+ mirror_repo = upstream_repo.copy(mirror_repodir)
|
|
158 |
+ mirror_ref = upstream_ref
|
|
159 |
+ upstream_ref = upstream_repo.create(dev_files_path)
|
|
160 |
+ |
|
161 |
+ element = {
|
|
162 |
+ 'kind': 'import',
|
|
163 |
+ 'sources': [
|
|
164 |
+ upstream_repo.source_config(ref=upstream_ref if ref_storage == 'inline' else None)
|
|
165 |
+ ]
|
|
166 |
+ }
|
|
167 |
+ element_name = 'test.bst'
|
|
168 |
+ element_path = os.path.join(element_dir, element_name)
|
|
169 |
+ full_repo = element['sources'][0]['url']
|
|
170 |
+ upstream_map, repo_name = os.path.split(full_repo)
|
|
171 |
+ alias = 'foo'
|
|
172 |
+ aliased_repo = alias + ':' + repo_name
|
|
173 |
+ element['sources'][0]['url'] = aliased_repo
|
|
174 |
+ full_mirror = mirror_repo.source_config()['url']
|
|
175 |
+ mirror_map, _ = os.path.split(full_mirror)
|
|
176 |
+ os.makedirs(element_dir)
|
|
177 |
+ _yaml.dump(element, element_path)
|
|
178 |
+ |
|
179 |
+ if ref_storage == 'project.refs':
|
|
180 |
+ # Manually set project.refs to avoid caching the repo prematurely
|
|
181 |
+ project_refs = {'projects': {
|
|
182 |
+ 'test': {
|
|
183 |
+ element_name: [
|
|
184 |
+ {'ref': upstream_ref}
|
|
185 |
+ ]
|
|
186 |
+ }
|
|
187 |
+ }}
|
|
188 |
+ project_refs_path = os.path.join(project_dir, 'project.refs')
|
|
189 |
+ _yaml.dump(project_refs, project_refs_path)
|
|
190 |
+ |
|
191 |
+ project = {
|
|
192 |
+ 'name': 'test',
|
|
193 |
+ 'element-path': 'elements',
|
|
194 |
+ 'aliases': {
|
|
195 |
+ alias: upstream_map + "/"
|
|
196 |
+ },
|
|
197 |
+ 'ref-storage': ref_storage
|
|
198 |
+ }
|
|
199 |
+ if mirror != 'no-mirror':
|
|
200 |
+ mirror_data = [{
|
|
201 |
+ 'name': 'middle-earth',
|
|
202 |
+ 'aliases': {alias: [mirror_map + '/']}
|
|
203 |
+ }]
|
|
204 |
+ if mirror == 'unrelated-mirror':
|
|
205 |
+ mirror_data.insert(0, {
|
|
206 |
+ 'name': 'narnia',
|
|
207 |
+ 'aliases': {'frob': ['http://www.example.com/repo']}
|
|
208 |
+ })
|
|
209 |
+ project['mirrors'] = mirror_data
|
|
210 |
+ |
|
211 |
+ project_file = os.path.join(project_dir, 'project.conf')
|
|
212 |
+ _yaml.dump(project, project_file)
|
|
213 |
+ |
|
214 |
+ result = cli.run(project=project_dir, args=['fetch', element_name])
|
|
215 |
+ result.assert_success()
|
|
216 |
+ |
|
217 |
+ |
|
142 | 218 |
@pytest.mark.datafiles(DATA_DIR)
|
143 | 219 |
@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
|
144 | 220 |
def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind):
|