Tiago Gomes pushed to branch tiagogomes/docs-improvements at BuildStream / buildstream
Commits:
-
14e1a3b3
by Jürg Billeter at 2018-10-01T08:18:05Z
-
232662f1
by Jürg Billeter at 2018-10-01T08:53:06Z
-
f447aedd
by Tiago Gomes at 2018-10-01T10:33:11Z
-
682dddce
by Tiago Gomes at 2018-10-01T10:35:12Z
-
fafa8136
by Tiago Gomes at 2018-10-01T10:59:54Z
-
26e1a3c7
by Jürg Billeter at 2018-10-01T14:58:06Z
-
f47895c0
by Jürg Billeter at 2018-10-01T14:58:06Z
-
cf00c0a1
by Jürg Billeter at 2018-10-01T15:32:30Z
-
5f4ae90b
by Jürg Billeter at 2018-10-02T06:34:02Z
-
0458bc4e
by Jürg Billeter at 2018-10-02T07:08:35Z
-
d5b396e0
by Phillip Smyth at 2018-10-02T16:40:18Z
-
dae842fd
by Phillip Smyth at 2018-10-02T17:14:09Z
-
8dc16d3f
by Jürg Billeter at 2018-10-03T05:08:21Z
-
66446fc3
by Jürg Billeter at 2018-10-03T05:36:38Z
-
29c19bea
by Tristan Van Berkom at 2018-10-03T07:33:48Z
-
b645881c
by Tristan Van Berkom at 2018-10-03T07:33:48Z
-
c9437616
by Tristan Van Berkom at 2018-10-03T08:07:15Z
-
11320fe2
by Tristan Van Berkom at 2018-10-03T09:33:39Z
-
91271964
by Tristan Van Berkom at 2018-10-03T09:59:40Z
-
3bf895d2
by Jonathan Maw at 2018-10-03T11:48:48Z
-
e4969807
by Jonathan Maw at 2018-10-03T12:48:07Z
-
a0814399
by Tristan Van Berkom at 2018-10-03T13:05:52Z
-
0a1f8e3c
by Tristan Van Berkom at 2018-10-03T13:42:20Z
-
11161f99
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
3e797bb9
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
d9020e43
by Tristan Van Berkom at 2018-10-03T13:44:02Z
-
3e5ff5a9
by Tristan Van Berkom at 2018-10-03T14:09:51Z
-
3247d22b
by Tiago Gomes at 2018-10-03T15:39:00Z
-
875496d2
by Tiago Gomes at 2018-10-03T15:39:00Z
24 changed files:
- .gitlab-ci.yml
- buildstream/_artifactcache/cascache.py
- buildstream/_artifactcache/casserver.py
- buildstream/_platform/darwin.py
- buildstream/_platform/linux.py
- buildstream/_platform/platform.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/queues/queue.py
- buildstream/_scheduler/scheduler.py
- buildstream/plugins/sources/git.py
- buildstream/sandbox/_sandboxremote.py
- buildstream/source.py
- buildstream/utils.py
- doc/source/index.rst
- − doc/source/install_docker.rst
- − doc/source/install_linux_distro.rst
- − doc/source/install_source.rst
- − doc/source/install_versions.rst
- − doc/source/main_install.rst
- doc/source/main_using.rst
- − doc/source/release-badge.rst
- − doc/source/snapshot-badge.rst
- doc/source/install_artifacts.rst → doc/source/using_configuring_artifact_server.rst
- tests/frontend/mirror.py
Changes:
... | ... | @@ -145,7 +145,8 @@ docs: |
145 | 145 |
stage: test
|
146 | 146 |
script:
|
147 | 147 |
- export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
|
148 |
- - pip3 install sphinx
|
|
148 |
+ # Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
|
|
149 |
+ - pip3 install sphinx==1.7.9
|
|
149 | 150 |
- pip3 install sphinx-click
|
150 | 151 |
- pip3 install sphinx_rtd_theme
|
151 | 152 |
- cd dist && ./unpack.sh && cd buildstream
|
... | ... | @@ -161,14 +162,14 @@ docs: |
161 | 162 |
.overnight-tests: &overnight-tests-template
|
162 | 163 |
stage: test
|
163 | 164 |
variables:
|
164 |
- bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
|
|
165 |
- bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
166 |
- fd_sdk_ref: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.12
|
|
165 |
+ BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
|
|
166 |
+ BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
|
|
167 |
+ FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
|
|
167 | 168 |
before_script:
|
168 | 169 |
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
|
169 |
- - pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
|
|
170 |
+ - pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
|
|
170 | 171 |
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
|
171 |
- - git -C freedesktop-sdk checkout ${fd_sdk_ref}
|
|
172 |
+ - git -C freedesktop-sdk checkout ${FD_SDK_REF}
|
|
172 | 173 |
only:
|
173 | 174 |
- schedules
|
174 | 175 |
|
... | ... | @@ -506,7 +506,7 @@ class CASCache(ArtifactCache): |
506 | 506 |
def set_ref(self, ref, tree):
|
507 | 507 |
refpath = self._refpath(ref)
|
508 | 508 |
os.makedirs(os.path.dirname(refpath), exist_ok=True)
|
509 |
- with utils.save_file_atomic(refpath, 'wb') as f:
|
|
509 |
+ with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
|
|
510 | 510 |
f.write(tree.SerializeToString())
|
511 | 511 |
|
512 | 512 |
# resolve_ref():
|
... | ... | @@ -1048,10 +1048,29 @@ class CASCache(ArtifactCache): |
1048 | 1048 |
missing_blobs[d.hash] = d
|
1049 | 1049 |
|
1050 | 1050 |
# Upload any blobs missing on the server
|
1051 |
- for blob_digest in missing_blobs.values():
|
|
1052 |
- with open(self.objpath(blob_digest), 'rb') as f:
|
|
1053 |
- assert os.fstat(f.fileno()).st_size == blob_digest.size_bytes
|
|
1054 |
- self._send_blob(remote, blob_digest, f, u_uid=u_uid)
|
|
1051 |
+ self._send_blobs(remote, missing_blobs.values(), u_uid)
|
|
1052 |
+ |
|
1053 |
+ def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
|
|
1054 |
+ batch = _CASBatchUpdate(remote)
|
|
1055 |
+ |
|
1056 |
+ for digest in digests:
|
|
1057 |
+ with open(self.objpath(digest), 'rb') as f:
|
|
1058 |
+ assert os.fstat(f.fileno()).st_size == digest.size_bytes
|
|
1059 |
+ |
|
1060 |
+ if (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
1061 |
+ not remote.batch_update_supported):
|
|
1062 |
+ # Too large for batch request, upload in independent request.
|
|
1063 |
+ self._send_blob(remote, digest, f, u_uid=u_uid)
|
|
1064 |
+ else:
|
|
1065 |
+ if not batch.add(digest, f):
|
|
1066 |
+ # Not enough space left in batch request.
|
|
1067 |
+ # Complete pending batch first.
|
|
1068 |
+ batch.send()
|
|
1069 |
+ batch = _CASBatchUpdate(remote)
|
|
1070 |
+ batch.add(digest, f)
|
|
1071 |
+ |
|
1072 |
+ # Send final batch
|
|
1073 |
+ batch.send()
|
|
1055 | 1074 |
|
1056 | 1075 |
|
1057 | 1076 |
# Represents a single remote CAS cache.
|
... | ... | @@ -1126,6 +1145,17 @@ class _CASRemote(): |
1126 | 1145 |
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
|
1127 | 1146 |
raise
|
1128 | 1147 |
|
1148 |
+ # Check whether the server supports BatchUpdateBlobs()
|
|
1149 |
+ self.batch_update_supported = False
|
|
1150 |
+ try:
|
|
1151 |
+ request = remote_execution_pb2.BatchUpdateBlobsRequest()
|
|
1152 |
+ response = self.cas.BatchUpdateBlobs(request)
|
|
1153 |
+ self.batch_update_supported = True
|
|
1154 |
+ except grpc.RpcError as e:
|
|
1155 |
+ if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
|
|
1156 |
+ e.code() != grpc.StatusCode.PERMISSION_DENIED):
|
|
1157 |
+ raise
|
|
1158 |
+ |
|
1129 | 1159 |
self._initialized = True
|
1130 | 1160 |
|
1131 | 1161 |
|
... | ... | @@ -1173,6 +1203,46 @@ class _CASBatchRead(): |
1173 | 1203 |
yield (response.digest, response.data)
|
1174 | 1204 |
|
1175 | 1205 |
|
1206 |
+# Represents a batch of blobs queued for upload.
|
|
1207 |
+#
|
|
1208 |
+class _CASBatchUpdate():
|
|
1209 |
+ def __init__(self, remote):
|
|
1210 |
+ self._remote = remote
|
|
1211 |
+ self._max_total_size_bytes = remote.max_batch_total_size_bytes
|
|
1212 |
+ self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
|
|
1213 |
+ self._size = 0
|
|
1214 |
+ self._sent = False
|
|
1215 |
+ |
|
1216 |
+ def add(self, digest, stream):
|
|
1217 |
+ assert not self._sent
|
|
1218 |
+ |
|
1219 |
+ new_batch_size = self._size + digest.size_bytes
|
|
1220 |
+ if new_batch_size > self._max_total_size_bytes:
|
|
1221 |
+ # Not enough space left in current batch
|
|
1222 |
+ return False
|
|
1223 |
+ |
|
1224 |
+ blob_request = self._request.requests.add()
|
|
1225 |
+ blob_request.digest.hash = digest.hash
|
|
1226 |
+ blob_request.digest.size_bytes = digest.size_bytes
|
|
1227 |
+ blob_request.data = stream.read(digest.size_bytes)
|
|
1228 |
+ self._size = new_batch_size
|
|
1229 |
+ return True
|
|
1230 |
+ |
|
1231 |
+ def send(self):
|
|
1232 |
+ assert not self._sent
|
|
1233 |
+ self._sent = True
|
|
1234 |
+ |
|
1235 |
+ if len(self._request.requests) == 0:
|
|
1236 |
+ return
|
|
1237 |
+ |
|
1238 |
+ batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
|
|
1239 |
+ |
|
1240 |
+ for response in batch_response.responses:
|
|
1241 |
+ if response.status.code != grpc.StatusCode.OK.value[0]:
|
|
1242 |
+ raise ArtifactError("Failed to upload blob {}: {}".format(
|
|
1243 |
+ response.digest.hash, response.status.code))
|
|
1244 |
+ |
|
1245 |
+ |
|
1176 | 1246 |
def _grouper(iterable, n):
|
1177 | 1247 |
while True:
|
1178 | 1248 |
try:
|
... | ... | @@ -68,7 +68,7 @@ def create_server(repo, *, enable_push): |
68 | 68 |
_ByteStreamServicer(artifactcache, enable_push=enable_push), server)
|
69 | 69 |
|
70 | 70 |
remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
|
71 |
- _ContentAddressableStorageServicer(artifactcache), server)
|
|
71 |
+ _ContentAddressableStorageServicer(artifactcache, enable_push=enable_push), server)
|
|
72 | 72 |
|
73 | 73 |
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
|
74 | 74 |
_CapabilitiesServicer(), server)
|
... | ... | @@ -222,9 +222,10 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): |
222 | 222 |
|
223 | 223 |
|
224 | 224 |
class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
|
225 |
- def __init__(self, cas):
|
|
225 |
+ def __init__(self, cas, *, enable_push):
|
|
226 | 226 |
super().__init__()
|
227 | 227 |
self.cas = cas
|
228 |
+ self.enable_push = enable_push
|
|
228 | 229 |
|
229 | 230 |
def FindMissingBlobs(self, request, context):
|
230 | 231 |
response = remote_execution_pb2.FindMissingBlobsResponse()
|
... | ... | @@ -260,6 +261,46 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres |
260 | 261 |
|
261 | 262 |
return response
|
262 | 263 |
|
264 |
+ def BatchUpdateBlobs(self, request, context):
|
|
265 |
+ response = remote_execution_pb2.BatchUpdateBlobsResponse()
|
|
266 |
+ |
|
267 |
+ if not self.enable_push:
|
|
268 |
+ context.set_code(grpc.StatusCode.PERMISSION_DENIED)
|
|
269 |
+ return response
|
|
270 |
+ |
|
271 |
+ batch_size = 0
|
|
272 |
+ |
|
273 |
+ for blob_request in request.requests:
|
|
274 |
+ digest = blob_request.digest
|
|
275 |
+ |
|
276 |
+ batch_size += digest.size_bytes
|
|
277 |
+ if batch_size > _MAX_PAYLOAD_BYTES:
|
|
278 |
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
|
279 |
+ return response
|
|
280 |
+ |
|
281 |
+ blob_response = response.responses.add()
|
|
282 |
+ blob_response.digest.hash = digest.hash
|
|
283 |
+ blob_response.digest.size_bytes = digest.size_bytes
|
|
284 |
+ |
|
285 |
+ if len(blob_request.data) != digest.size_bytes:
|
|
286 |
+ blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
|
|
287 |
+ continue
|
|
288 |
+ |
|
289 |
+ try:
|
|
290 |
+ _clean_up_cache(self.cas, digest.size_bytes)
|
|
291 |
+ |
|
292 |
+ with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
|
|
293 |
+ out.write(blob_request.data)
|
|
294 |
+ out.flush()
|
|
295 |
+ server_digest = self.cas.add_object(path=out.name)
|
|
296 |
+ if server_digest.hash != digest.hash:
|
|
297 |
+ blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
|
|
298 |
+ |
|
299 |
+ except ArtifactTooLargeException:
|
|
300 |
+ blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
|
|
301 |
+ |
|
302 |
+ return response
|
|
303 |
+ |
|
263 | 304 |
|
264 | 305 |
class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
|
265 | 306 |
def GetCapabilities(self, request, context):
|
... | ... | @@ -41,10 +41,11 @@ class Darwin(Platform): |
41 | 41 |
return True
|
42 | 42 |
|
43 | 43 |
def get_cpu_count(self, cap=None):
|
44 |
- if cap < os.cpu_count():
|
|
45 |
- return cap
|
|
44 |
+ cpu_count = os.cpu_count()
|
|
45 |
+ if cap is None:
|
|
46 |
+ return cpu_count
|
|
46 | 47 |
else:
|
47 |
- return os.cpu_count()
|
|
48 |
+ return min(cpu_count, cap)
|
|
48 | 49 |
|
49 | 50 |
def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
|
50 | 51 |
super().set_resource_limits(soft_limit)
|
... | ... | @@ -55,6 +55,10 @@ class Linux(Platform): |
55 | 55 |
return SandboxBwrap(*args, **kwargs)
|
56 | 56 |
|
57 | 57 |
def check_sandbox_config(self, config):
|
58 |
+ if not self._local_sandbox_available():
|
|
59 |
+ # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
|
|
60 |
+ return True
|
|
61 |
+ |
|
58 | 62 |
if self._user_ns_available:
|
59 | 63 |
# User namespace support allows arbitrary build UID/GID settings.
|
60 | 64 |
return True
|
... | ... | @@ -67,7 +67,11 @@ class Platform(): |
67 | 67 |
return cls._instance
|
68 | 68 |
|
69 | 69 |
def get_cpu_count(self, cap=None):
|
70 |
- return min(len(os.sched_getaffinity(0)), cap)
|
|
70 |
+ cpu_count = len(os.sched_getaffinity(0))
|
|
71 |
+ if cap is None:
|
|
72 |
+ return cpu_count
|
|
73 |
+ else:
|
|
74 |
+ return min(cpu_count, cap)
|
|
71 | 75 |
|
72 | 76 |
##################################################################
|
73 | 77 |
# Sandbox functions #
|
... | ... | @@ -119,6 +119,8 @@ class Job(): |
119 | 119 |
self._result = None # Return value of child action in the parent
|
120 | 120 |
self._tries = 0 # Try count, for retryable jobs
|
121 | 121 |
self._skipped_flag = False # Indicate whether the job was skipped.
|
122 |
+ self._terminated = False # Whether this job has been explicitly terminated
|
|
123 |
+ |
|
122 | 124 |
# If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
|
123 | 125 |
#
|
124 | 126 |
self._retry_flag = True
|
... | ... | @@ -190,6 +192,8 @@ class Job(): |
190 | 192 |
# Terminate the process using multiprocessing API pathway
|
191 | 193 |
self._process.terminate()
|
192 | 194 |
|
195 |
+ self._terminated = True
|
|
196 |
+ |
|
193 | 197 |
# terminate_wait()
|
194 | 198 |
#
|
195 | 199 |
# Wait for terminated jobs to complete
|
... | ... | @@ -273,18 +277,22 @@ class Job(): |
273 | 277 |
# running the integration commands).
|
274 | 278 |
#
|
275 | 279 |
# Args:
|
276 |
- # (int): The plugin identifier for this task
|
|
280 |
+ # task_id (int): The plugin identifier for this task
|
|
277 | 281 |
#
|
278 | 282 |
def set_task_id(self, task_id):
|
279 | 283 |
self._task_id = task_id
|
280 | 284 |
|
281 | 285 |
# skipped
|
282 | 286 |
#
|
287 |
+ # This will evaluate to True if the job was skipped
|
|
288 |
+ # during processing, or if it was forcefully terminated.
|
|
289 |
+ #
|
|
283 | 290 |
# Returns:
|
284 |
- # bool: True if the job was skipped while processing.
|
|
291 |
+ # (bool): Whether the job should appear as skipped
|
|
292 |
+ #
|
|
285 | 293 |
@property
|
286 | 294 |
def skipped(self):
|
287 |
- return self._skipped_flag
|
|
295 |
+ return self._skipped_flag or self._terminated
|
|
288 | 296 |
|
289 | 297 |
#######################################################
|
290 | 298 |
# Abstract Methods #
|
... | ... | @@ -326,16 +326,20 @@ class Queue(): |
326 | 326 |
detail=traceback.format_exc())
|
327 | 327 |
self.failed_elements.append(element)
|
328 | 328 |
else:
|
329 |
- |
|
330 |
- # No exception occured, handle the success/failure state in the normal way
|
|
331 | 329 |
#
|
330 |
+ # No exception occured in post processing
|
|
331 |
+ #
|
|
332 |
+ |
|
333 |
+ # All jobs get placed on the done queue for later processing.
|
|
332 | 334 |
self._done_queue.append(job)
|
333 | 335 |
|
334 |
- if success:
|
|
335 |
- if not job.skipped:
|
|
336 |
- self.processed_elements.append(element)
|
|
337 |
- else:
|
|
338 |
- self.skipped_elements.append(element)
|
|
336 |
+ # A Job can be skipped whether or not it has failed,
|
|
337 |
+ # we want to only bookkeep them as processed or failed
|
|
338 |
+ # if they are not skipped.
|
|
339 |
+ if job.skipped:
|
|
340 |
+ self.skipped_elements.append(element)
|
|
341 |
+ elif success:
|
|
342 |
+ self.processed_elements.append(element)
|
|
339 | 343 |
else:
|
340 | 344 |
self.failed_elements.append(element)
|
341 | 345 |
|
... | ... | @@ -387,6 +387,15 @@ class Scheduler(): |
387 | 387 |
# A loop registered event callback for keyboard interrupts
|
388 | 388 |
#
|
389 | 389 |
def _interrupt_event(self):
|
390 |
+ |
|
391 |
+ # FIXME: This should not be needed, but for some reason we receive an
|
|
392 |
+ # additional SIGINT event when the user hits ^C a second time
|
|
393 |
+ # to inform us that they really intend to terminate; even though
|
|
394 |
+ # we have disconnected our handlers at this time.
|
|
395 |
+ #
|
|
396 |
+ if self.terminated:
|
|
397 |
+ return
|
|
398 |
+ |
|
390 | 399 |
# Leave this to the frontend to decide, if no
|
391 | 400 |
# interrrupt callback was specified, then just terminate.
|
392 | 401 |
if self._interrupt_callback:
|
... | ... | @@ -184,10 +184,18 @@ class GitMirror(SourceFetcher): |
184 | 184 |
cwd=self.mirror)
|
185 | 185 |
|
186 | 186 |
def fetch(self, alias_override=None):
|
187 |
- self.ensure(alias_override)
|
|
188 |
- if not self.has_ref():
|
|
189 |
- self._fetch(alias_override)
|
|
190 |
- self.assert_ref()
|
|
187 |
+ # Resolve the URL for the message
|
|
188 |
+ resolved_url = self.source.translate_url(self.url,
|
|
189 |
+ alias_override=alias_override,
|
|
190 |
+ primary=self.primary)
|
|
191 |
+ |
|
192 |
+ with self.source.timed_activity("Fetching from {}"
|
|
193 |
+ .format(resolved_url),
|
|
194 |
+ silent_nested=True):
|
|
195 |
+ self.ensure(alias_override)
|
|
196 |
+ if not self.has_ref():
|
|
197 |
+ self._fetch(alias_override)
|
|
198 |
+ self.assert_ref()
|
|
191 | 199 |
|
192 | 200 |
def has_ref(self):
|
193 | 201 |
if not self.ref:
|
... | ... | @@ -177,15 +177,11 @@ class SandboxRemote(Sandbox): |
177 | 177 |
if not cascache.verify_digest_pushed(self._get_project(), upload_vdir.ref):
|
178 | 178 |
raise SandboxError("Failed to verify that source has been pushed to the remote artifact cache.")
|
179 | 179 |
|
180 |
- # Set up environment and working directory
|
|
181 |
- if cwd is None:
|
|
182 |
- cwd = self._get_work_directory()
|
|
183 |
- |
|
184 |
- if cwd is None:
|
|
185 |
- cwd = '/'
|
|
186 |
- |
|
187 |
- if env is None:
|
|
188 |
- env = self._get_environment()
|
|
180 |
+ # Fallback to the sandbox default settings for
|
|
181 |
+ # the cwd and env.
|
|
182 |
+ #
|
|
183 |
+ cwd = self._get_work_directory(cwd=cwd)
|
|
184 |
+ env = self._get_environment(cwd=cwd, env=env)
|
|
189 | 185 |
|
190 | 186 |
# We want command args as a list of strings
|
191 | 187 |
if isinstance(command, str):
|
... | ... | @@ -965,28 +965,48 @@ class Source(Plugin): |
965 | 965 |
# Tries to call fetch for every mirror, stopping once it succeeds
|
966 | 966 |
def __do_fetch(self, **kwargs):
|
967 | 967 |
project = self._get_project()
|
968 |
- source_fetchers = self.get_source_fetchers()
|
|
968 |
+ context = self._get_context()
|
|
969 |
+ |
|
970 |
+ # Silence the STATUS messages which might happen as a result
|
|
971 |
+ # of checking the source fetchers.
|
|
972 |
+ with context.silence():
|
|
973 |
+ source_fetchers = self.get_source_fetchers()
|
|
969 | 974 |
|
970 | 975 |
# Use the source fetchers if they are provided
|
971 | 976 |
#
|
972 | 977 |
if source_fetchers:
|
973 |
- for fetcher in source_fetchers:
|
|
974 |
- alias = fetcher._get_alias()
|
|
975 |
- for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
|
|
976 |
- try:
|
|
977 |
- fetcher.fetch(uri)
|
|
978 |
- # FIXME: Need to consider temporary vs. permanent failures,
|
|
979 |
- # and how this works with retries.
|
|
980 |
- except BstError as e:
|
|
981 |
- last_error = e
|
|
982 |
- continue
|
|
983 |
- |
|
984 |
- # No error, we're done with this fetcher
|
|
985 |
- break
|
|
986 | 978 |
|
987 |
- else:
|
|
988 |
- # No break occurred, raise the last detected error
|
|
989 |
- raise last_error
|
|
979 |
+ # Use a contorted loop here, this is to allow us to
|
|
980 |
+ # silence the messages which can result from consuming
|
|
981 |
+ # the items of source_fetchers, if it happens to be a generator.
|
|
982 |
+ #
|
|
983 |
+ source_fetchers = iter(source_fetchers)
|
|
984 |
+ try:
|
|
985 |
+ |
|
986 |
+ while True:
|
|
987 |
+ |
|
988 |
+ with context.silence():
|
|
989 |
+ fetcher = next(source_fetchers)
|
|
990 |
+ |
|
991 |
+ alias = fetcher._get_alias()
|
|
992 |
+ for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
|
|
993 |
+ try:
|
|
994 |
+ fetcher.fetch(uri)
|
|
995 |
+ # FIXME: Need to consider temporary vs. permanent failures,
|
|
996 |
+ # and how this works with retries.
|
|
997 |
+ except BstError as e:
|
|
998 |
+ last_error = e
|
|
999 |
+ continue
|
|
1000 |
+ |
|
1001 |
+ # No error, we're done with this fetcher
|
|
1002 |
+ break
|
|
1003 |
+ |
|
1004 |
+ else:
|
|
1005 |
+ # No break occurred, raise the last detected error
|
|
1006 |
+ raise last_error
|
|
1007 |
+ |
|
1008 |
+ except StopIteration:
|
|
1009 |
+ pass
|
|
990 | 1010 |
|
991 | 1011 |
# Default codepath is to reinstantiate the Source
|
992 | 1012 |
#
|
... | ... | @@ -502,7 +502,7 @@ def get_bst_version(): |
502 | 502 |
|
503 | 503 |
@contextmanager
|
504 | 504 |
def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
|
505 |
- errors=None, newline=None, closefd=True, opener=None):
|
|
505 |
+ errors=None, newline=None, closefd=True, opener=None, tempdir=None):
|
|
506 | 506 |
"""Save a file with a temporary name and rename it into place when ready.
|
507 | 507 |
|
508 | 508 |
This is a context manager which is meant for saving data to files.
|
... | ... | @@ -529,8 +529,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None, |
529 | 529 |
# https://bugs.python.org/issue8604
|
530 | 530 |
|
531 | 531 |
assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
|
532 |
- dirname = os.path.dirname(filename)
|
|
533 |
- fd, tempname = tempfile.mkstemp(dir=dirname)
|
|
532 |
+ if tempdir is None:
|
|
533 |
+ tempdir = os.path.dirname(filename)
|
|
534 |
+ fd, tempname = tempfile.mkstemp(dir=tempdir)
|
|
534 | 535 |
os.close(fd)
|
535 | 536 |
|
536 | 537 |
f = open(tempname, mode=mode, buffering=buffering, encoding=encoding,
|
... | ... | @@ -562,6 +563,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None, |
562 | 563 |
#
|
563 | 564 |
# Get the disk usage of a given directory in bytes.
|
564 | 565 |
#
|
566 |
+# This function assumes that files do not inadvertantly
|
|
567 |
+# disappear while this function is running.
|
|
568 |
+#
|
|
565 | 569 |
# Arguments:
|
566 | 570 |
# (str) The path whose size to check.
|
567 | 571 |
#
|
... | ... | @@ -13,20 +13,14 @@ They begin with a basic introduction to BuildStream, background |
13 | 13 |
information on basic concepts, and a guide to the BuildStream command line interface.
|
14 | 14 |
Later sections provide detailed information on BuildStream internals.
|
15 | 15 |
|
16 |
+If you are looking for information about how to install BuildStream,
|
|
17 |
+please refer to the `BuildStream website <https://buildstream.build>`_.
|
|
16 | 18 |
|
17 | 19 |
.. toctree::
|
18 |
- :maxdepth: 1
|
|
20 |
+ :maxdepth: 2
|
|
19 | 21 |
|
20 | 22 |
main_about
|
21 |
- main_install
|
|
22 | 23 |
main_using
|
23 | 24 |
main_core
|
24 | 25 |
CONTRIBUTING
|
25 | 26 |
|
26 |
- |
|
27 |
-Resources
|
|
28 |
----------
|
|
29 |
-* GitLab repository: https://gitlab.com/BuildStream/buildstream
|
|
30 |
-* Bug Tracking: https://gitlab.com/BuildStream/buildstream/issues
|
|
31 |
-* Mailing list: https://mail.gnome.org/mailman/listinfo/buildstream-list
|
|
32 |
-* IRC Channel: irc://irc.gnome.org/#buildstream
|
1 |
- |
|
2 |
- |
|
3 |
-.. _docker:
|
|
4 |
- |
|
5 |
-BuildStream inside Docker
|
|
6 |
--------------------------
|
|
7 |
-If your system cannot provide the base system requirements for BuildStream, then it is possible to run buildstream within a Docker image.
|
|
8 |
- |
|
9 |
-The BuildStream project provides
|
|
10 |
-`Docker images <https://hub.docker.com/r/buildstream/buildstream-fedora>`_
|
|
11 |
-containing BuildStream and its dependencies.
|
|
12 |
-This gives you an easy way to get started using BuildStream on any Unix-like
|
|
13 |
-platform where Docker is available, including Mac OS X.
|
|
14 |
- |
|
15 |
-We recommend using the
|
|
16 |
-`bst-here wrapper script <https://gitlab.com/BuildStream/buildstream/blob/master/contrib/bst-here>`_
|
|
17 |
-which automates the necessary container setup. You can download it and make
|
|
18 |
-it executable like this:
|
|
19 |
- |
|
20 |
-.. code:: bash
|
|
21 |
- |
|
22 |
- mkdir -p ~/.local/bin
|
|
23 |
- curl --get https://gitlab.com/BuildStream/buildstream/raw/master/contrib/bst-here > ~/.local/bin/bst-here
|
|
24 |
- chmod +x ~/.local/bin/bst-here
|
|
25 |
- |
|
26 |
-Check if ``~/.local/bin`` appears in your PATH environment variable -- if it
|
|
27 |
-doesn't, you should
|
|
28 |
-`edit your ~/.profile so that it does <https://stackoverflow.com/questions/14637979/>`_.
|
|
29 |
- |
|
30 |
-Once the script is available in your PATH, you can run ``bst-here`` to open a
|
|
31 |
-shell session inside a new container based off the latest version of the
|
|
32 |
-buildstream-fedora Docker image. The current working directory will be mounted
|
|
33 |
-inside the container at ``/src``.
|
|
34 |
- |
|
35 |
-You can also run individual BuildStream commands as ``bst-here COMMAND``. For
|
|
36 |
-example: ``bst-here show systems/my-system.bst``. Note that BuildStream won't
|
|
37 |
-be able to integrate with Bash tab-completion if you invoke it in this way.
|
|
38 |
- |
|
39 |
-Two Docker volumes are set up by the ``bst-here`` script:
|
|
40 |
- |
|
41 |
- * ``buildstream-cache --`` mounted at ``~/.cache/buildstream``
|
|
42 |
- * ``buildstream-config --`` mounted at ``~/.config/``
|
|
43 |
- |
|
44 |
-These are necessary so that your BuildStream cache and configuration files
|
|
45 |
-persist between invocations of ``bst-here``.
|
1 |
- |
|
2 |
- |
|
3 |
-.. _install_linux_distro:
|
|
4 |
- |
|
5 |
-Installing from distro packages
|
|
6 |
-===============================
|
|
7 |
-BuildStream is available on some linux distributions, here are
|
|
8 |
-some install instructions for the linux distributions which
|
|
9 |
-have packaged BuildStream.
|
|
10 |
- |
|
11 |
- |
|
12 |
-Arch Linux
|
|
13 |
-----------
|
|
14 |
-Packages for Arch exist in `AUR <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_.
|
|
15 |
-Two different package versions are available:
|
|
16 |
- |
|
17 |
-* Latest release: `buildstream <https://aur.archlinux.org/packages/buildstream>`_
|
|
18 |
-* Latest development snapshot: `buildstream-git <https://aur.archlinux.org/packages/buildstream-git>`_
|
|
19 |
- |
|
20 |
- |
|
21 |
-Fedora
|
|
22 |
-------
|
|
23 |
-BuildStream is not yet in the official Fedora repositories, but you can
|
|
24 |
-install it from a Copr::
|
|
25 |
- |
|
26 |
- sudo dnf copr enable bochecha/buildstream
|
|
27 |
- sudo dnf install buildstream
|
|
28 |
- |
|
29 |
-Optionally, install the ``buildstream-docs`` package to have the BuildStream
|
|
30 |
-documentation in Devhelp or GNOME Builder.
|
1 |
- |
|
2 |
- |
|
3 |
-Installing from source
|
|
4 |
-======================
|
|
5 |
-Until BuildStream is available in :ref:`your distro <install_linux_distro>`, you will
|
|
6 |
-need to install it yourself from source.
|
|
7 |
- |
|
8 |
- |
|
9 |
-Installing dependencies
|
|
10 |
------------------------
|
|
11 |
-Before installing BuildStream from source, it is necessary to first install
|
|
12 |
-the system dependencies. Below are some linux distribution specific instructions
|
|
13 |
-for installing these dependencies.
|
|
14 |
- |
|
15 |
-BuildStream requires the following base system requirements:
|
|
16 |
- |
|
17 |
-* python3 >= 3.5
|
|
18 |
-* bubblewrap >= 0.1.2
|
|
19 |
-* fuse2
|
|
20 |
- |
|
21 |
-BuildStream also depends on the host tools for the :mod:`Source <buildstream.source>` plugins.
|
|
22 |
-Refer to the respective :ref:`source plugin <plugins_sources>` documentation for host tool
|
|
23 |
-requirements of specific plugins.
|
|
24 |
- |
|
25 |
-The default plugins with extra host dependencies are:
|
|
26 |
- |
|
27 |
-* bzr
|
|
28 |
-* deb
|
|
29 |
-* git
|
|
30 |
-* ostree
|
|
31 |
-* patch
|
|
32 |
-* pip
|
|
33 |
-* tar
|
|
34 |
- |
|
35 |
-If you intend to push built artifacts to a remote artifact server,
|
|
36 |
-which requires special permissions, you will also need:
|
|
37 |
- |
|
38 |
-* ssh
|
|
39 |
- |
|
40 |
- |
|
41 |
-Arch Linux
|
|
42 |
-~~~~~~~~~~
|
|
43 |
-Install the dependencies with::
|
|
44 |
- |
|
45 |
- sudo pacman -S \
|
|
46 |
- python fuse2 bubblewrap \
|
|
47 |
- python-pip
|
|
48 |
- |
|
49 |
-For the default plugins::
|
|
50 |
- |
|
51 |
- sudo pacman -S \
|
|
52 |
- bzr git lzip ostree patch python-gobject
|
|
53 |
- |
|
54 |
- |
|
55 |
-The package *python-arpy* is required by the deb source plugin. This is not
|
|
56 |
-obtainable via `pacman`, you must get *python-arpy* from AUR:
|
|
57 |
-https://aur.archlinux.org/packages/python-arpy/
|
|
58 |
- |
|
59 |
-To install::
|
|
60 |
- |
|
61 |
- wget https://aur.archlinux.org/cgit/aur.git/snapshot/python-arpy.tar.gz
|
|
62 |
- tar -xvf python-arpy.tar.gz
|
|
63 |
- cd python-arpy
|
|
64 |
- makepkg -si
|
|
65 |
- |
|
66 |
- |
|
67 |
-Debian
|
|
68 |
-~~~~~~
|
|
69 |
-Install the dependencies with::
|
|
70 |
- |
|
71 |
- sudo apt-get install \
|
|
72 |
- python3 fuse bubblewrap \
|
|
73 |
- python3-pip python3-dev
|
|
74 |
- |
|
75 |
-For the default plugins:
|
|
76 |
- |
|
77 |
- |
|
78 |
-Stretch
|
|
79 |
-+++++++
|
|
80 |
-With stretch, you first need to ensure that you have the backports repository
|
|
81 |
-setup as described `here <https://backports.debian.org/Instructions/>`_
|
|
82 |
- |
|
83 |
-By adding the following line to your sources.list::
|
|
84 |
- |
|
85 |
- deb http://deb.debian.org/debian stretch-backports main
|
|
86 |
- |
|
87 |
-And then running::
|
|
88 |
- |
|
89 |
- sudo apt update
|
|
90 |
- |
|
91 |
-At this point you should be able to get the system requirements for the default plugins with::
|
|
92 |
- |
|
93 |
- sudo apt install \
|
|
94 |
- bzr git lzip patch python3-arpy python3-gi
|
|
95 |
- sudo apt install -t stretch-backports \
|
|
96 |
- gir1.2-ostree-1.0 ostree
|
|
97 |
- |
|
98 |
- |
|
99 |
-Buster or Sid
|
|
100 |
-+++++++++++++
|
|
101 |
-For debian unstable or testing, only the following line should be enough
|
|
102 |
-to get the system requirements for the default plugins installed::
|
|
103 |
- |
|
104 |
- sudo apt-get install \
|
|
105 |
- lzip gir1.2-ostree-1.0 git bzr ostree patch python3-arpy python3-gi
|
|
106 |
- |
|
107 |
- |
|
108 |
-Fedora
|
|
109 |
-~~~~~~
|
|
110 |
-For recent fedora systems, the following line should get you the system
|
|
111 |
-requirements you need::
|
|
112 |
- |
|
113 |
- dnf install -y \
|
|
114 |
- python3 fuse bubblewrap \
|
|
115 |
- python3-pip python3-devel
|
|
116 |
- |
|
117 |
-For the default plugins::
|
|
118 |
- |
|
119 |
- dnf install -y \
|
|
120 |
- bzr git lzip patch ostree python3-gobject
|
|
121 |
- pip3 install --user arpy
|
|
122 |
- |
|
123 |
- |
|
124 |
-Ubuntu
|
|
125 |
-~~~~~~
|
|
126 |
- |
|
127 |
- |
|
128 |
-Ubuntu 18.04 LTS or later
|
|
129 |
-+++++++++++++++++++++++++
|
|
130 |
-Install the dependencies with::
|
|
131 |
- |
|
132 |
- sudo apt install \
|
|
133 |
- python3 fuse bubblewrap \
|
|
134 |
- python3-pip python3-dev
|
|
135 |
- |
|
136 |
-For the default plugins::
|
|
137 |
- |
|
138 |
- sudo apt install \
|
|
139 |
- bzr gir1.2-ostree-1.0 git lzip ostree patch python3-arpy python3-gi
|
|
140 |
- |
|
141 |
- |
|
142 |
-Ubuntu 16.04 LTS
|
|
143 |
-++++++++++++++++
|
|
144 |
-On Ubuntu 16.04, neither `bubblewrap <https://github.com/projectatomic/bubblewrap/>`_
|
|
145 |
-or `ostree <https://github.com/ostreedev/ostree>`_ are available in the official repositories.
|
|
146 |
-You will need to install them in whichever way you see fit. Refer the the upstream documentation
|
|
147 |
-for advice on this.
|
|
148 |
- |
|
149 |
- |
|
150 |
-Installing
|
|
151 |
-----------
|
|
152 |
-Once you have the base system dependencies, you can install the BuildStream
|
|
153 |
-python package as a regular user.
|
|
154 |
- |
|
155 |
- |
|
156 |
-Installing from PyPI (recommended)
|
|
157 |
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
158 |
-Since we only ever publish :ref:`release versions <install_semantic_versioning>` on
|
|
159 |
-PyPI, it is currently recommended to use this installation path. This will
|
|
160 |
-ensure that you always have the latest recommended version of BuildStream that
|
|
161 |
-we recommend.
|
|
162 |
- |
|
163 |
-To install from PyPI, you will additionally require:
|
|
164 |
- |
|
165 |
-* pip for python3 (only required for setup)
|
|
166 |
-* Python 3 development libraries and headers
|
|
167 |
- |
|
168 |
-Simply run the following command::
|
|
169 |
- |
|
170 |
- pip3 install --user BuildStream
|
|
171 |
- |
|
172 |
-This will install latest stable version of BuildStream and its pure python
|
|
173 |
-dependencies into your user's homedir in ``~/.local``.
|
|
174 |
- |
|
175 |
-Keep following the instructions below to ensure that the ``bst``
|
|
176 |
-command is in your ``PATH`` and to enable bash completions for it.
|
|
177 |
- |
|
178 |
-.. note::
|
|
179 |
- |
|
180 |
- If you want a specific version of BuildStream, you can install it using
|
|
181 |
- ``pip install --user BuildStream==<version-number>``
|
|
182 |
- |
|
183 |
- |
|
184 |
-Upgrading from PyPI
|
|
185 |
-+++++++++++++++++++
|
|
186 |
-Once you have already installed BuildStream from PyPI, you can later update
|
|
187 |
-to the latest recommended version like so::
|
|
188 |
- |
|
189 |
- pip install --user --upgrade BuildStream
|
|
190 |
- |
|
191 |
- |
|
192 |
-.. _install_git_checkout:
|
|
193 |
- |
|
194 |
-Installing from a git checkout
|
|
195 |
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
196 |
-To install directly from the `git repository <https://gitlab.com/BuildStream/buildstream.git>`_
|
|
197 |
-using python's ``pip`` package manager, you will additionally require:
|
|
198 |
- |
|
199 |
-* pip for python3 (only required for setup)
|
|
200 |
-* Python 3 development libraries and headers
|
|
201 |
-* git (to checkout BuildStream)
|
|
202 |
- |
|
203 |
-Before installing, please check the existing tags in the git repository
|
|
204 |
-and determine which version you want to install, and whether you want
|
|
205 |
-to install an official release version (recommended), or a development snapshot
|
|
206 |
-to help us out testing the bleeding edge of development. Follow the
|
|
207 |
-:ref:`semantic versioning guide <install_semantic_versioning>` to determine
|
|
208 |
-which tag you intend to install.
|
|
209 |
- |
|
210 |
-Run the following commands::
|
|
211 |
- |
|
212 |
- git clone https://gitlab.com/BuildStream/buildstream.git
|
|
213 |
- cd buildstream
|
|
214 |
- git checkout <desired release tag>
|
|
215 |
- pip3 install --user -e .
|
|
216 |
- |
|
217 |
-This will install buildstream's pure python dependencies into
|
|
218 |
-your user's homedir in ``~/.local`` and will run BuildStream directly
|
|
219 |
-from the git checkout directory.
|
|
220 |
- |
|
221 |
-Keep following the instructions below to ensure that the ``bst``
|
|
222 |
-command is in your ``PATH`` and to enable bash completions for it.
|
|
223 |
- |
|
224 |
-.. note::
|
|
225 |
- |
|
226 |
- We recommend the ``-e`` option because you can upgrade your
|
|
227 |
- installation by simply updating the checked out git repository.
|
|
228 |
- |
|
229 |
- If you want a full installation that is not linked to your
|
|
230 |
- git checkout, just omit the ``-e`` option from the above commands.
|
|
231 |
- |
|
232 |
- |
|
233 |
-Upgrading from a git checkout
|
|
234 |
-+++++++++++++++++++++++++++++
|
|
235 |
-If you installed BuildStream from a local git checkout using ``-e`` option, all
|
|
236 |
-you need to do to upgrade BuildStream is to update your local git checkout::
|
|
237 |
- |
|
238 |
- cd /path/to/buildstream
|
|
239 |
- git pull --rebase
|
|
240 |
- |
|
241 |
-If you did not specify the ``-e`` option at install time or the dependancies
|
|
242 |
-have changed, you will need to cleanly reinstall BuildStream::
|
|
243 |
- |
|
244 |
- pip3 uninstall buildstream
|
|
245 |
- cd /path/to/buildstream
|
|
246 |
- git pull --rebase
|
|
247 |
- pip3 install --user .
|
|
248 |
- |
|
249 |
-.. note::
|
|
250 |
- |
|
251 |
- If BuildStream has added any dependencies since the last upgrade,
|
|
252 |
- you will need to uninstall and reinstall to ensure those dependencies
|
|
253 |
- are met, regardless of whether you have used the ``-e`` option at
|
|
254 |
- install time.
|
|
255 |
- |
|
256 |
- |
|
257 |
-Post install setup
|
|
258 |
-------------------
|
|
259 |
-After having installed from source using any of the above methods, some
|
|
260 |
-setup will be required to use BuildStream.
|
|
261 |
- |
|
262 |
- |
|
263 |
-Adjust PATH
|
|
264 |
-~~~~~~~~~~~
|
|
265 |
-Since BuildStream is now installed under your local user's install directories,
|
|
266 |
-you need to ensure that ``PATH`` is adjusted.
|
|
267 |
- |
|
268 |
-A regular way to do this is to add the following line to the end of your ``~/.bashrc``::
|
|
269 |
- |
|
270 |
- export PATH="${PATH}:${HOME}/.local/bin"
|
|
271 |
- |
|
272 |
-.. note::
|
|
273 |
- |
|
274 |
- You will have to restart your terminal in order for these changes to take effect.
|
|
275 |
- |
|
276 |
- |
|
277 |
-Bash completions
|
|
278 |
-~~~~~~~~~~~~~~~~
|
|
279 |
-Bash completions are supported by sourcing the ``buildstream/data/bst``
|
|
280 |
-script found in the BuildStream repository. On many systems this script
|
|
281 |
-can be installed into a completions directory but when installing BuildStream
|
|
282 |
-without a package manager this is not an option.
|
|
283 |
- |
|
284 |
-To enable completions for an installation of BuildStream you
|
|
285 |
-installed yourself from git, just append the script verbatim
|
|
286 |
-to your ``~/.bash_completion``:
|
|
287 |
- |
|
288 |
-.. literalinclude:: ../../buildstream/data/bst
|
|
289 |
- :language: yaml
|
1 |
- |
|
2 |
- |
|
3 |
-.. _install_semantic_versioning:
|
|
4 |
- |
|
5 |
-Semantic Versioning
|
|
6 |
-===================
|
|
7 |
-BuildStream follows the Semantic Versioning Convention `(SemVer) <https://semver.org/>`_,
|
|
8 |
-and uses even minor point numbers to denote releases intended for users while
|
|
9 |
-odd minor point numbers represent development snapshops.
|
|
10 |
- |
|
11 |
-For example, for a given version number ``X.Y.Z``
|
|
12 |
- * The ``X.<even number>.*`` versions are releases intended for users.
|
|
13 |
- * The ``X.<odd number>.*`` versions are development spanshots intended for testing.
|
|
14 |
- |
|
15 |
-If you are :ref:`installing from git <install_git_checkout>`, please look for the latest
|
|
16 |
-tag to ensure you're getting the latest release.
|
|
17 |
- |
|
18 |
-* Latest release:
|
|
19 |
- |
|
20 |
- .. include:: release-badge.rst
|
|
21 |
- |
|
22 |
-* Latest development snapshot:
|
|
23 |
- |
|
24 |
- .. include:: snapshot-badge.rst
|
1 |
- |
|
2 |
- |
|
3 |
-.. _install:
|
|
4 |
- |
|
5 |
-Install
|
|
6 |
-=======
|
|
7 |
- |
|
8 |
-.. include:: release-badge.rst
|
|
9 |
- |
|
10 |
-.. include:: snapshot-badge.rst
|
|
11 |
- |
|
12 |
-This section provides instructions for installing BuildStream and its
|
|
13 |
-companion artifact server on various platforms, along with any installation
|
|
14 |
-related materials.
|
|
15 |
- |
|
16 |
-.. note::
|
|
17 |
- |
|
18 |
- BuildStream is currently only supported natively on Linux. Users of Unix-like
|
|
19 |
- systems where Docker is available can still use BuildStream by following the
|
|
20 |
- :ref:`Docker install guide <docker>`
|
|
21 |
- |
|
22 |
-.. toctree::
|
|
23 |
- :maxdepth: 1
|
|
24 |
- |
|
25 |
- install_source
|
|
26 |
- install_linux_distro
|
|
27 |
- install_docker
|
|
28 |
- install_artifacts
|
|
29 |
- install_versions
|
... | ... | @@ -15,3 +15,4 @@ guides and information on user preferences and configuration. |
15 | 15 |
using_examples
|
16 | 16 |
using_config
|
17 | 17 |
using_commands
|
18 |
+ using_configuring_artifact_server
|
1 |
- |
|
2 |
-.. Use this file to include the badge in the documentation, but not in
|
|
3 |
- the README.rst or gitlab rendered materials, that doesnt work.
|
|
4 |
- |
|
5 |
- This is partly a workaround for a sphinx issue, we will be able
|
|
6 |
- to avoid the raw html once this is implemented in sphinx:
|
|
7 |
- |
|
8 |
- https://github.com/sphinx-doc/sphinx/issues/2240
|
|
9 |
- |
|
10 |
- Using the <object> tag instead of the <img> tag which sphinx generates
|
|
11 |
- allows the svg to be "interactive", for us this basically means that
|
|
12 |
- the link we encode in the badge svg is used, rather than static urls
|
|
13 |
- which need to be used around the <img> tag.
|
|
14 |
- |
|
15 |
- WARNING: The custom CSS on the style tag will need to change if we
|
|
16 |
- change the theme, so that the <object> tag behaves similar
|
|
17 |
- to how the <img> tag is themed by the style sheets.
|
|
18 |
- |
|
19 |
-.. raw:: html
|
|
20 |
- |
|
21 |
- <a class="reference external image-reference">
|
|
22 |
- <object style="margin-bottom:24px;vertical-align:middle"
|
|
23 |
- data=""
|
|
24 |
- type="image/svg+xml"/>
|
|
25 |
- </object>
|
|
26 |
- </a>
|
1 |
- |
|
2 |
-.. Use this file to include the badge in the documentation, but not in
|
|
3 |
- the README.rst or gitlab rendered materials, that doesnt work.
|
|
4 |
- |
|
5 |
- This is partly a workaround for a sphinx issue, we will be able
|
|
6 |
- to avoid the raw html once this is implemented in sphinx:
|
|
7 |
- |
|
8 |
- https://github.com/sphinx-doc/sphinx/issues/2240
|
|
9 |
- |
|
10 |
- Using the <object> tag instead of the <img> tag which sphinx generates
|
|
11 |
- allows the svg to be "interactive", for us this basically means that
|
|
12 |
- the link we encode in the badge svg is used, rather than static urls
|
|
13 |
- which need to be used around the <img> tag.
|
|
14 |
- |
|
15 |
- WARNING: The custom CSS on the style tag will need to change if we
|
|
16 |
- change the theme, so that the <object> tag behaves similar
|
|
17 |
- to how the <img> tag is themed by the style sheets.
|
|
18 |
- |
|
19 |
-.. raw:: html
|
|
20 |
- |
|
21 |
- <a class="reference external image-reference">
|
|
22 |
- <object style="margin-bottom:24px;vertical-align:middle"
|
|
23 |
- data=""
|
|
24 |
- type="image/svg+xml"/>
|
|
25 |
- </object>
|
|
26 |
- </a>
|
... | ... | @@ -2,8 +2,8 @@ |
2 | 2 |
|
3 | 3 |
.. _artifacts:
|
4 | 4 |
|
5 |
-Installing an artifact server
|
|
6 |
-=============================
|
|
5 |
+Configuring Artifact Server
|
|
6 |
+===========================
|
|
7 | 7 |
BuildStream caches the results of builds in a local artifact cache, and will
|
8 | 8 |
avoid building an element if there is a suitable build already present in the
|
9 | 9 |
local artifact cache.
|
... | ... | @@ -72,7 +72,7 @@ Installing the server |
72 | 72 |
~~~~~~~~~~~~~~~~~~~~~
|
73 | 73 |
You will also need to install BuildStream on the artifact server in order
|
74 | 74 |
to receive uploaded artifacts over ssh. Follow the instructions for installing
|
75 |
-BuildStream :ref:`here <install>`
|
|
75 |
+BuildStream `here <https://buildstream.build/install.html>`_.
|
|
76 | 76 |
|
77 | 77 |
When installing BuildStream on the artifact server, it must be installed
|
78 | 78 |
in a system wide location, with ``pip3 install .`` in the BuildStream
|
... | ... | @@ -139,6 +139,82 @@ def test_mirror_fetch(cli, tmpdir, datafiles, kind): |
139 | 139 |
result.assert_success()
|
140 | 140 |
|
141 | 141 |
|
142 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
143 |
+@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
|
|
144 |
+@pytest.mark.parametrize("mirror", [("no-mirror"), ("mirror"), ("unrelated-mirror")])
|
|
145 |
+def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
|
|
146 |
+ bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
|
|
147 |
+ dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
|
|
148 |
+ upstream_repodir = os.path.join(str(tmpdir), 'upstream')
|
|
149 |
+ mirror_repodir = os.path.join(str(tmpdir), 'mirror')
|
|
150 |
+ project_dir = os.path.join(str(tmpdir), 'project')
|
|
151 |
+ os.makedirs(project_dir)
|
|
152 |
+ element_dir = os.path.join(project_dir, 'elements')
|
|
153 |
+ |
|
154 |
+ # Create repo objects of the upstream and mirror
|
|
155 |
+ upstream_repo = create_repo('tar', upstream_repodir)
|
|
156 |
+ upstream_ref = upstream_repo.create(bin_files_path)
|
|
157 |
+ mirror_repo = upstream_repo.copy(mirror_repodir)
|
|
158 |
+ mirror_ref = upstream_ref
|
|
159 |
+ upstream_ref = upstream_repo.create(dev_files_path)
|
|
160 |
+ |
|
161 |
+ element = {
|
|
162 |
+ 'kind': 'import',
|
|
163 |
+ 'sources': [
|
|
164 |
+ upstream_repo.source_config(ref=upstream_ref if ref_storage == 'inline' else None)
|
|
165 |
+ ]
|
|
166 |
+ }
|
|
167 |
+ element_name = 'test.bst'
|
|
168 |
+ element_path = os.path.join(element_dir, element_name)
|
|
169 |
+ full_repo = element['sources'][0]['url']
|
|
170 |
+ upstream_map, repo_name = os.path.split(full_repo)
|
|
171 |
+ alias = 'foo'
|
|
172 |
+ aliased_repo = alias + ':' + repo_name
|
|
173 |
+ element['sources'][0]['url'] = aliased_repo
|
|
174 |
+ full_mirror = mirror_repo.source_config()['url']
|
|
175 |
+ mirror_map, _ = os.path.split(full_mirror)
|
|
176 |
+ os.makedirs(element_dir)
|
|
177 |
+ _yaml.dump(element, element_path)
|
|
178 |
+ |
|
179 |
+ if ref_storage == 'project.refs':
|
|
180 |
+ # Manually set project.refs to avoid caching the repo prematurely
|
|
181 |
+ project_refs = {'projects': {
|
|
182 |
+ 'test': {
|
|
183 |
+ element_name: [
|
|
184 |
+ {'ref': upstream_ref}
|
|
185 |
+ ]
|
|
186 |
+ }
|
|
187 |
+ }}
|
|
188 |
+ project_refs_path = os.path.join(project_dir, 'project.refs')
|
|
189 |
+ _yaml.dump(project_refs, project_refs_path)
|
|
190 |
+ |
|
191 |
+ project = {
|
|
192 |
+ 'name': 'test',
|
|
193 |
+ 'element-path': 'elements',
|
|
194 |
+ 'aliases': {
|
|
195 |
+ alias: upstream_map + "/"
|
|
196 |
+ },
|
|
197 |
+ 'ref-storage': ref_storage
|
|
198 |
+ }
|
|
199 |
+ if mirror != 'no-mirror':
|
|
200 |
+ mirror_data = [{
|
|
201 |
+ 'name': 'middle-earth',
|
|
202 |
+ 'aliases': {alias: [mirror_map + '/']}
|
|
203 |
+ }]
|
|
204 |
+ if mirror == 'unrelated-mirror':
|
|
205 |
+ mirror_data.insert(0, {
|
|
206 |
+ 'name': 'narnia',
|
|
207 |
+ 'aliases': {'frob': ['http://www.example.com/repo']}
|
|
208 |
+ })
|
|
209 |
+ project['mirrors'] = mirror_data
|
|
210 |
+ |
|
211 |
+ project_file = os.path.join(project_dir, 'project.conf')
|
|
212 |
+ _yaml.dump(project, project_file)
|
|
213 |
+ |
|
214 |
+ result = cli.run(project=project_dir, args=['fetch', element_name])
|
|
215 |
+ result.assert_success()
|
|
216 |
+ |
|
217 |
+ |
|
142 | 218 |
@pytest.mark.datafiles(DATA_DIR)
|
143 | 219 |
@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
|
144 | 220 |
def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind):
|