Tom Pollard pushed to branch tpollard/494 at BuildStream / buildstream
Commits:
-
900e8900
by Jürg Billeter at 2018-09-27T13:12:34Z
-
107269c1
by Jürg Billeter at 2018-09-27T13:12:34Z
-
8060ad8c
by Jürg Billeter at 2018-09-27T13:12:34Z
-
ab1cb672
by Jürg Billeter at 2018-09-27T13:46:41Z
-
fcf37f9c
by knownexus at 2018-09-27T14:22:09Z
-
80ebdd2c
by knownexus at 2018-09-27T14:22:09Z
-
5e81573b
by knownexus at 2018-09-27T14:22:09Z
-
3c93fe97
by knownexus at 2018-09-27T14:22:09Z
-
0f3ef369
by knownexus at 2018-09-27T14:22:24Z
-
b78ae767
by knownexus at 2018-09-27T14:29:29Z
-
ab5e78b4
by James Ennis at 2018-09-27T14:29:32Z
-
261f65ca
by Jürg Billeter at 2018-09-27T14:54:47Z
-
d2ea3069
by Richard Maw at 2018-09-27T15:16:57Z
-
c128429a
by richardmaw-codethink at 2018-09-27T15:42:18Z
-
e91eb38b
by Valentin David at 2018-09-27T16:32:57Z
-
7babd55c
by Tiago Gomes at 2018-09-27T16:57:12Z
-
5cdfac81
by Mathieu Bridon at 2018-09-28T09:20:20Z
-
5d3f039f
by Tom Pollard at 2018-09-28T10:34:13Z
-
14e1a3b3
by Jürg Billeter at 2018-10-01T08:18:05Z
-
232662f1
by Jürg Billeter at 2018-10-01T08:53:06Z
-
76e07b54
by Tom Pollard at 2018-10-01T10:24:33Z
22 changed files:
- buildstream/_artifactcache/artifactcache.py
- buildstream/_artifactcache/cascache.py
- buildstream/_frontend/app.py
- buildstream/_frontend/cli.py
- + buildstream/_platform/darwin.py
- buildstream/_platform/linux.py
- buildstream/_platform/platform.py
- buildstream/_platform/unix.py
- buildstream/_project.py
- buildstream/_scheduler/queues/pullqueue.py
- buildstream/_stream.py
- buildstream/_yaml.py
- buildstream/data/userconfig.yaml
- buildstream/element.py
- buildstream/plugins/sources/git.py
- buildstream/sandbox/__init__.py
- + buildstream/sandbox/_sandboxdummy.py
- buildstream/utils.py
- tests/format/project.py
- tests/integration/cachedfail.py
- tests/sources/git.py
- tests/testutils/repo/git.py
Changes:
... | ... | @@ -38,8 +38,9 @@ CACHE_SIZE_FILE = "cache_size" |
38 | 38 |
# url (str): Location of the remote artifact cache
|
39 | 39 |
# push (bool): Whether we should attempt to push artifacts to this cache,
|
40 | 40 |
# in addition to pulling from it.
|
41 |
+# buildtrees (bool): Whether the default action of pull should include the artifact buildtree
|
|
41 | 42 |
#
|
42 |
-class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')):
|
|
43 |
+class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push buildtrees server_cert client_key client_cert')):
|
|
43 | 44 |
|
44 | 45 |
# _new_from_config_node
|
45 | 46 |
#
|
... | ... | @@ -47,9 +48,10 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl |
47 | 48 |
#
|
48 | 49 |
@staticmethod
|
49 | 50 |
def _new_from_config_node(spec_node, basedir=None):
|
50 |
- _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert'])
|
|
51 |
+ _yaml.node_validate(spec_node, ['url', 'push', 'pullbuildtrees', 'server-cert', 'client-key', 'client-cert'])
|
|
51 | 52 |
url = _yaml.node_get(spec_node, str, 'url')
|
52 | 53 |
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
|
54 |
+ buildtrees = _yaml.node_get(spec_node, bool, 'pullbuildtrees', default_value=False)
|
|
53 | 55 |
if not url:
|
54 | 56 |
provenance = _yaml.node_get_provenance(spec_node, 'url')
|
55 | 57 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
... | ... | @@ -77,7 +79,7 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl |
77 | 79 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
78 | 80 |
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
|
79 | 81 |
|
80 |
- return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
|
|
82 |
+ return ArtifactCacheSpec(url, push, buildtrees, server_cert, client_key, client_cert)
|
|
81 | 83 |
|
82 | 84 |
|
83 | 85 |
ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
|
... | ... | @@ -426,6 +428,22 @@ class ArtifactCache(): |
426 | 428 |
raise ImplError("Cache '{kind}' does not implement contains()"
|
427 | 429 |
.format(kind=type(self).__name__))
|
428 | 430 |
|
431 |
+ # contains_subdir_artifact():
|
|
432 |
+ #
|
|
433 |
+ # Check whether an artifact element contains a digest for a subdir
|
|
434 |
+ # which is populated in the cache, i.e non dangling.
|
|
435 |
+ #
|
|
436 |
+ # Args:
|
|
437 |
+ # element (Element): The Element to check
|
|
438 |
+ # key (str): The cache key to use
|
|
439 |
+ # subdir (str): The subdir to check
|
|
440 |
+ #
|
|
441 |
+ # Returns: True if the subdir exists & is populated in the cache, False otherwise
|
|
442 |
+ #
|
|
443 |
+ def contains_subdir_artifact(self, element, key, subdir):
|
|
444 |
+ raise ImplError("Cache '{kind}' does not implement contains_subdir_artifact()"
|
|
445 |
+ .format(kind=type(self).__name__))
|
|
446 |
+ |
|
429 | 447 |
# list_artifacts():
|
430 | 448 |
#
|
431 | 449 |
# List artifacts in this cache in LRU order.
|
... | ... | @@ -551,11 +569,12 @@ class ArtifactCache(): |
551 | 569 |
# element (Element): The Element whose artifact is to be fetched
|
552 | 570 |
# key (str): The cache key to use
|
553 | 571 |
# progress (callable): The progress callback, if any
|
572 |
+ # subdir (str): The optional specific subdir to pull
|
|
554 | 573 |
#
|
555 | 574 |
# Returns:
|
556 | 575 |
# (bool): True if pull was successful, False if artifact was not available
|
557 | 576 |
#
|
558 |
- def pull(self, element, key, *, progress=None):
|
|
577 |
+ def pull(self, element, key, *, progress=None, subdir=None, excluded_subdirs=None):
|
|
559 | 578 |
raise ImplError("Cache '{kind}' does not implement pull()"
|
560 | 579 |
.format(kind=type(self).__name__))
|
561 | 580 |
|
... | ... | @@ -67,7 +67,6 @@ class CASCache(ArtifactCache): |
67 | 67 |
self.casdir = os.path.join(context.artifactdir, 'cas')
|
68 | 68 |
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
|
69 | 69 |
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
|
70 |
- |
|
71 | 70 |
self._calculate_cache_quota()
|
72 | 71 |
|
73 | 72 |
# Per-project list of _CASRemote instances.
|
... | ... | @@ -92,6 +91,16 @@ class CASCache(ArtifactCache): |
92 | 91 |
# This assumes that the repository doesn't have any dangling pointers
|
93 | 92 |
return os.path.exists(refpath)
|
94 | 93 |
|
94 |
+ def contains_subdir_artifact(self, element, key, subdir):
|
|
95 |
+ tree = self.resolve_ref(self.get_artifact_fullname(element, key))
|
|
96 |
+ |
|
97 |
+ # This assumes that the subdir digest is present in the element tree
|
|
98 |
+ subdirdigest = self._get_subdir(tree, subdir)
|
|
99 |
+ objpath = self.objpath(subdirdigest)
|
|
100 |
+ |
|
101 |
+ # True if subdir content is cached or if empty as expected
|
|
102 |
+ return os.path.exists(objpath)
|
|
103 |
+ |
|
95 | 104 |
def extract(self, element, key):
|
96 | 105 |
ref = self.get_artifact_fullname(element, key)
|
97 | 106 |
|
... | ... | @@ -228,7 +237,7 @@ class CASCache(ArtifactCache): |
228 | 237 |
remotes_for_project = self._remotes[element._get_project()]
|
229 | 238 |
return any(remote.spec.push for remote in remotes_for_project)
|
230 | 239 |
|
231 |
- def pull(self, element, key, *, progress=None):
|
|
240 |
+ def pull(self, element, key, *, progress=None, subdir=None, excluded_subdirs=None):
|
|
232 | 241 |
ref = self.get_artifact_fullname(element, key)
|
233 | 242 |
|
234 | 243 |
project = element._get_project()
|
... | ... | @@ -247,8 +256,14 @@ class CASCache(ArtifactCache): |
247 | 256 |
tree.hash = response.digest.hash
|
248 | 257 |
tree.size_bytes = response.digest.size_bytes
|
249 | 258 |
|
250 |
- self._fetch_directory(remote, tree)
|
|
259 |
+ # Check if the element artifact is present, if so just fetch subdir
|
|
260 |
+ if subdir and os.path.exists(self.objpath(tree)):
|
|
261 |
+ self._fetch_subdir(remote, tree, subdir)
|
|
262 |
+ else:
|
|
263 |
+ # Fetch artifact, excluded_subdirs determined in pullqueue
|
|
264 |
+ self._fetch_directory(remote, tree, excluded_subdirs=excluded_subdirs)
|
|
251 | 265 |
|
266 |
+ # tree is the remote value, so is the same without or without dangling ref locally
|
|
252 | 267 |
self.set_ref(ref, tree)
|
253 | 268 |
|
254 | 269 |
element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
|
... | ... | @@ -649,7 +664,6 @@ class CASCache(ArtifactCache): |
649 | 664 |
################################################
|
650 | 665 |
# Local Private Methods #
|
651 | 666 |
################################################
|
652 |
- |
|
653 | 667 |
def _checkout(self, dest, tree):
|
654 | 668 |
os.makedirs(dest, exist_ok=True)
|
655 | 669 |
|
... | ... | @@ -668,8 +682,10 @@ class CASCache(ArtifactCache): |
668 | 682 |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
|
669 | 683 |
|
670 | 684 |
for dirnode in directory.directories:
|
671 |
- fullpath = os.path.join(dest, dirnode.name)
|
|
672 |
- self._checkout(fullpath, dirnode.digest)
|
|
685 |
+ # Don't try to checkout a dangling ref
|
|
686 |
+ if os.path.exists(self.objpath(dirnode.digest)):
|
|
687 |
+ fullpath = os.path.join(dest, dirnode.name)
|
|
688 |
+ self._checkout(fullpath, dirnode.digest)
|
|
673 | 689 |
|
674 | 690 |
for symlinknode in directory.symlinks:
|
675 | 691 |
# symlink
|
... | ... | @@ -948,10 +964,12 @@ class CASCache(ArtifactCache): |
948 | 964 |
# remote (Remote): The remote to use.
|
949 | 965 |
# dir_digest (Digest): Digest object for the directory to fetch.
|
950 | 966 |
#
|
951 |
- def _fetch_directory(self, remote, dir_digest):
|
|
967 |
+ def _fetch_directory(self, remote, dir_digest, *, excluded_subdirs=None):
|
|
952 | 968 |
fetch_queue = [dir_digest]
|
953 | 969 |
fetch_next_queue = []
|
954 | 970 |
batch = _CASBatchRead(remote)
|
971 |
+ if not excluded_subdirs:
|
|
972 |
+ excluded_subdirs = []
|
|
955 | 973 |
|
956 | 974 |
while len(fetch_queue) + len(fetch_next_queue) > 0:
|
957 | 975 |
if len(fetch_queue) == 0:
|
... | ... | @@ -966,8 +984,9 @@ class CASCache(ArtifactCache): |
966 | 984 |
directory.ParseFromString(f.read())
|
967 | 985 |
|
968 | 986 |
for dirnode in directory.directories:
|
969 |
- batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
|
970 |
- fetch_queue, fetch_next_queue, recursive=True)
|
|
987 |
+ if dirnode.name not in excluded_subdirs:
|
|
988 |
+ batch = self._fetch_directory_node(remote, dirnode.digest, batch,
|
|
989 |
+ fetch_queue, fetch_next_queue, recursive=True)
|
|
971 | 990 |
|
972 | 991 |
for filenode in directory.files:
|
973 | 992 |
batch = self._fetch_directory_node(remote, filenode.digest, batch,
|
... | ... | @@ -976,6 +995,12 @@ class CASCache(ArtifactCache): |
976 | 995 |
# Fetch final batch
|
977 | 996 |
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
|
978 | 997 |
|
998 |
+ |
|
999 |
+ def _fetch_subdir(self, remote, tree, subdir):
|
|
1000 |
+ subdirdigest = self._get_subdir(tree, subdir)
|
|
1001 |
+ self._fetch_directory(remote, subdirdigest)
|
|
1002 |
+ |
|
1003 |
+ |
|
979 | 1004 |
def _fetch_tree(self, remote, digest):
|
980 | 1005 |
# download but do not store the Tree object
|
981 | 1006 |
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
|
... | ... | @@ -115,14 +115,6 @@ class App(): |
115 | 115 |
else:
|
116 | 116 |
self.colors = False
|
117 | 117 |
|
118 |
- # Increase the soft limit for open file descriptors to the maximum.
|
|
119 |
- # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
|
|
120 |
- # Avoid hitting the limit too quickly.
|
|
121 |
- limits = resource.getrlimit(resource.RLIMIT_NOFILE)
|
|
122 |
- if limits[0] != limits[1]:
|
|
123 |
- # Set soft limit to hard limit
|
|
124 |
- resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1]))
|
|
125 |
- |
|
126 | 118 |
# create()
|
127 | 119 |
#
|
128 | 120 |
# Should be used instead of the regular constructor.
|
... | ... | @@ -305,10 +305,12 @@ def init(app, project_name, format_version, element_path, force): |
305 | 305 |
help="Allow tracking to cross junction boundaries")
|
306 | 306 |
@click.option('--track-save', default=False, is_flag=True,
|
307 | 307 |
help="Deprecated: This is ignored")
|
308 |
+@click.option('--pull-buildtrees', default=False, is_flag=True,
|
|
309 |
+ help="Pull buildtrees from a remote cache server")
|
|
308 | 310 |
@click.argument('elements', nargs=-1,
|
309 | 311 |
type=click.Path(readable=False))
|
310 | 312 |
@click.pass_obj
|
311 |
-def build(app, elements, all_, track_, track_save, track_all, track_except, track_cross_junctions):
|
|
313 |
+def build(app, elements, all_, track_, track_save, track_all, track_except, track_cross_junctions, pull_buildtrees):
|
|
312 | 314 |
"""Build elements in a pipeline"""
|
313 | 315 |
|
314 | 316 |
if (track_except or track_cross_junctions) and not (track_ or track_all):
|
... | ... | @@ -327,7 +329,8 @@ def build(app, elements, all_, track_, track_save, track_all, track_except, trac |
327 | 329 |
track_targets=track_,
|
328 | 330 |
track_except=track_except,
|
329 | 331 |
track_cross_junctions=track_cross_junctions,
|
330 |
- build_all=all_)
|
|
332 |
+ build_all=all_,
|
|
333 |
+ pull_buildtrees=pull_buildtrees)
|
|
331 | 334 |
|
332 | 335 |
|
333 | 336 |
##################################################################
|
... | ... | @@ -429,10 +432,12 @@ def track(app, elements, deps, except_, cross_junctions): |
429 | 432 |
help='The dependency artifacts to pull (default: none)')
|
430 | 433 |
@click.option('--remote', '-r',
|
431 | 434 |
help="The URL of the remote cache (defaults to the first configured cache)")
|
435 |
+@click.option('--pull-buildtrees', default=False, is_flag=True,
|
|
436 |
+ help="Pull buildtrees from a remote cache server")
|
|
432 | 437 |
@click.argument('elements', nargs=-1,
|
433 | 438 |
type=click.Path(readable=False))
|
434 | 439 |
@click.pass_obj
|
435 |
-def pull(app, elements, deps, remote):
|
|
440 |
+def pull(app, elements, deps, remote, pull_buildtrees):
|
|
436 | 441 |
"""Pull a built artifact from the configured remote artifact cache.
|
437 | 442 |
|
438 | 443 |
By default the artifact will be pulled one of the configured caches
|
... | ... | @@ -446,7 +451,7 @@ def pull(app, elements, deps, remote): |
446 | 451 |
all: All dependencies
|
447 | 452 |
"""
|
448 | 453 |
with app.initialized(session_name="Pull"):
|
449 |
- app.stream.pull(elements, selection=deps, remote=remote)
|
|
454 |
+ app.stream.pull(elements, selection=deps, remote=remote, pull_buildtrees=pull_buildtrees)
|
|
450 | 455 |
|
451 | 456 |
|
452 | 457 |
##################################################################
|
1 |
+#
|
|
2 |
+# Copyright (C) 2017 Codethink Limited
|
|
3 |
+# Copyright (C) 2018 Bloomberg Finance LP
|
|
4 |
+#
|
|
5 |
+# This program is free software; you can redistribute it and/or
|
|
6 |
+# modify it under the terms of the GNU Lesser General Public
|
|
7 |
+# License as published by the Free Software Foundation; either
|
|
8 |
+# version 2 of the License, or (at your option) any later version.
|
|
9 |
+#
|
|
10 |
+# This library is distributed in the hope that it will be useful,
|
|
11 |
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 |
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
13 |
+# Lesser General Public License for more details.
|
|
14 |
+#
|
|
15 |
+# You should have received a copy of the GNU Lesser General Public
|
|
16 |
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
|
17 |
+ |
|
18 |
+import os
|
|
19 |
+import resource
|
|
20 |
+ |
|
21 |
+from .._exceptions import PlatformError
|
|
22 |
+from ..sandbox import SandboxDummy
|
|
23 |
+ |
|
24 |
+from . import Platform
|
|
25 |
+ |
|
26 |
+ |
|
27 |
+class Darwin(Platform):
|
|
28 |
+ |
|
29 |
+ # This value comes from OPEN_MAX in syslimits.h
|
|
30 |
+ OPEN_MAX = 10240
|
|
31 |
+ |
|
32 |
+ def __init__(self):
|
|
33 |
+ |
|
34 |
+ super().__init__()
|
|
35 |
+ |
|
36 |
+ def create_sandbox(self, *args, **kwargs):
|
|
37 |
+ return SandboxDummy(*args, **kwargs)
|
|
38 |
+ |
|
39 |
+ def check_sandbox_config(self, config):
|
|
40 |
+ # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
|
|
41 |
+ return True
|
|
42 |
+ |
|
43 |
+ def get_cpu_count(self, cap=None):
|
|
44 |
+ cpu_count = os.cpu_count()
|
|
45 |
+ if cap is None:
|
|
46 |
+ return cpu_count
|
|
47 |
+ else:
|
|
48 |
+ return min(cpu_count, cap)
|
|
49 |
+ |
|
50 |
+ def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
|
|
51 |
+ super().set_resource_limits(soft_limit)
|
... | ... | @@ -23,7 +23,7 @@ import subprocess |
23 | 23 |
from .. import _site
|
24 | 24 |
from .. import utils
|
25 | 25 |
from .._message import Message, MessageType
|
26 |
-from ..sandbox import SandboxBwrap
|
|
26 |
+from ..sandbox import SandboxDummy
|
|
27 | 27 |
|
28 | 28 |
from . import Platform
|
29 | 29 |
|
... | ... | @@ -38,13 +38,21 @@ class Linux(Platform): |
38 | 38 |
self._gid = os.getegid()
|
39 | 39 |
|
40 | 40 |
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
|
41 |
- self._user_ns_available = self._check_user_ns_available()
|
|
41 |
+ |
|
42 |
+ if self._local_sandbox_available():
|
|
43 |
+ self._user_ns_available = self._check_user_ns_available()
|
|
44 |
+ else:
|
|
45 |
+ self._user_ns_available = False
|
|
42 | 46 |
|
43 | 47 |
def create_sandbox(self, *args, **kwargs):
|
44 |
- # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
|
|
45 |
- kwargs['user_ns_available'] = self._user_ns_available
|
|
46 |
- kwargs['die_with_parent_available'] = self._die_with_parent_available
|
|
47 |
- return SandboxBwrap(*args, **kwargs)
|
|
48 |
+ if not self._local_sandbox_available():
|
|
49 |
+ return SandboxDummy(*args, **kwargs)
|
|
50 |
+ else:
|
|
51 |
+ from ..sandbox._sandboxbwrap import SandboxBwrap
|
|
52 |
+ # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
|
|
53 |
+ kwargs['user_ns_available'] = self._user_ns_available
|
|
54 |
+ kwargs['die_with_parent_available'] = self._die_with_parent_available
|
|
55 |
+ return SandboxBwrap(*args, **kwargs)
|
|
48 | 56 |
|
49 | 57 |
def check_sandbox_config(self, config):
|
50 | 58 |
if self._user_ns_available:
|
... | ... | @@ -58,8 +66,13 @@ class Linux(Platform): |
58 | 66 |
################################################
|
59 | 67 |
# Private Methods #
|
60 | 68 |
################################################
|
61 |
- def _check_user_ns_available(self):
|
|
69 |
+ def _local_sandbox_available(self):
|
|
70 |
+ try:
|
|
71 |
+ return os.path.exists(utils.get_host_tool('bwrap')) and os.path.exists('/dev/fuse')
|
|
72 |
+ except utils.ProgramNotFoundError:
|
|
73 |
+ return False
|
|
62 | 74 |
|
75 |
+ def _check_user_ns_available(self):
|
|
63 | 76 |
# Here, lets check if bwrap is able to create user namespaces,
|
64 | 77 |
# issue a warning if it's not available, and save the state
|
65 | 78 |
# locally so that we can inform the sandbox to not try it
|
... | ... | @@ -19,6 +19,7 @@ |
19 | 19 |
|
20 | 20 |
import os
|
21 | 21 |
import sys
|
22 |
+import resource
|
|
22 | 23 |
|
23 | 24 |
from .._exceptions import PlatformError, ImplError
|
24 | 25 |
|
... | ... | @@ -32,23 +33,26 @@ class Platform(): |
32 | 33 |
# sandbox factory as well as platform helpers.
|
33 | 34 |
#
|
34 | 35 |
def __init__(self):
|
35 |
- pass
|
|
36 |
+ self.set_resource_limits()
|
|
36 | 37 |
|
37 | 38 |
@classmethod
|
38 | 39 |
def _create_instance(cls):
|
39 |
- if sys.platform.startswith('linux'):
|
|
40 |
- backend = 'linux'
|
|
41 |
- else:
|
|
42 |
- backend = 'unix'
|
|
43 |
- |
|
44 | 40 |
# Meant for testing purposes and therefore hidden in the
|
45 | 41 |
# deepest corners of the source code. Try not to abuse this,
|
46 | 42 |
# please?
|
47 | 43 |
if os.getenv('BST_FORCE_BACKEND'):
|
48 | 44 |
backend = os.getenv('BST_FORCE_BACKEND')
|
45 |
+ elif sys.platform.startswith('linux'):
|
|
46 |
+ backend = 'linux'
|
|
47 |
+ elif sys.platform.startswith('darwin'):
|
|
48 |
+ backend = 'darwin'
|
|
49 |
+ else:
|
|
50 |
+ backend = 'unix'
|
|
49 | 51 |
|
50 | 52 |
if backend == 'linux':
|
51 | 53 |
from .linux import Linux as PlatformImpl
|
54 |
+ elif backend == 'darwin':
|
|
55 |
+ from .darwin import Darwin as PlatformImpl
|
|
52 | 56 |
elif backend == 'unix':
|
53 | 57 |
from .unix import Unix as PlatformImpl
|
54 | 58 |
else:
|
... | ... | @@ -62,6 +66,13 @@ class Platform(): |
62 | 66 |
cls._create_instance()
|
63 | 67 |
return cls._instance
|
64 | 68 |
|
69 |
+ def get_cpu_count(self, cap=None):
|
|
70 |
+ cpu_count = len(os.sched_getaffinity(0))
|
|
71 |
+ if cap is None:
|
|
72 |
+ return cpu_count
|
|
73 |
+ else:
|
|
74 |
+ return min(cpu_count, cap)
|
|
75 |
+ |
|
65 | 76 |
##################################################################
|
66 | 77 |
# Sandbox functions #
|
67 | 78 |
##################################################################
|
... | ... | @@ -84,3 +95,15 @@ class Platform(): |
84 | 95 |
def check_sandbox_config(self, config):
|
85 | 96 |
raise ImplError("Platform {platform} does not implement check_sandbox_config()"
|
86 | 97 |
.format(platform=type(self).__name__))
|
98 |
+ |
|
99 |
+ def set_resource_limits(self, soft_limit=None, hard_limit=None):
|
|
100 |
+ # Need to set resources for _frontend/app.py as this is dependent on the platform
|
|
101 |
+ # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
|
|
102 |
+ # Avoid hitting the limit too quickly.
|
|
103 |
+ limits = resource.getrlimit(resource.RLIMIT_NOFILE)
|
|
104 |
+ if limits[0] != limits[1]:
|
|
105 |
+ if soft_limit is None:
|
|
106 |
+ soft_limit = limits[1]
|
|
107 |
+ if hard_limit is None:
|
|
108 |
+ hard_limit = limits[1]
|
|
109 |
+ resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
|
... | ... | @@ -20,7 +20,6 @@ |
20 | 20 |
import os
|
21 | 21 |
|
22 | 22 |
from .._exceptions import PlatformError
|
23 |
-from ..sandbox import SandboxChroot
|
|
24 | 23 |
|
25 | 24 |
from . import Platform
|
26 | 25 |
|
... | ... | @@ -39,6 +38,7 @@ class Unix(Platform): |
39 | 38 |
raise PlatformError("Root privileges are required to run without bubblewrap.")
|
40 | 39 |
|
41 | 40 |
def create_sandbox(self, *args, **kwargs):
|
41 |
+ from ..sandbox._sandboxchroot import SandboxChroot
|
|
42 | 42 |
return SandboxChroot(*args, **kwargs)
|
43 | 43 |
|
44 | 44 |
def check_sandbox_config(self, config):
|
... | ... | @@ -38,6 +38,7 @@ from ._loader import Loader |
38 | 38 |
from .element import Element
|
39 | 39 |
from ._message import Message, MessageType
|
40 | 40 |
from ._includes import Includes
|
41 |
+from ._platform import Platform
|
|
41 | 42 |
|
42 | 43 |
|
43 | 44 |
# Project Configuration file
|
... | ... | @@ -617,7 +618,8 @@ class Project(): |
617 | 618 |
# Based on some testing (mainly on AWS), maximum effective
|
618 | 619 |
# max-jobs value seems to be around 8-10 if we have enough cores
|
619 | 620 |
# users should set values based on workload and build infrastructure
|
620 |
- output.base_variables['max-jobs'] = str(min(len(os.sched_getaffinity(0)), 8))
|
|
621 |
+ platform = Platform.get_platform()
|
|
622 |
+ output.base_variables['max-jobs'] = str(platform.get_cpu_count(8))
|
|
621 | 623 |
|
622 | 624 |
# Export options into variables, if that was requested
|
623 | 625 |
output.options.export_variables(output.base_variables)
|
... | ... | @@ -32,9 +32,20 @@ class PullQueue(Queue): |
32 | 32 |
complete_name = "Pulled"
|
33 | 33 |
resources = [ResourceType.DOWNLOAD, ResourceType.CACHE]
|
34 | 34 |
|
35 |
+ def __init__(self, scheduler, buildtrees=False):
|
|
36 |
+ super().__init__(scheduler)
|
|
37 |
+ |
|
38 |
+ # Current default exclusions on pull
|
|
39 |
+ self._excluded_subdirs = ["buildtree"]
|
|
40 |
+ self._subdir = None
|
|
41 |
+ # If buildtrees are to be pulled, remove the value from exclusion list
|
|
42 |
+ if buildtrees:
|
|
43 |
+ self._subdir = "buildtree"
|
|
44 |
+ self._excluded_subdirs.remove(self._subdir)
|
|
45 |
+ |
|
35 | 46 |
def process(self, element):
|
36 | 47 |
# returns whether an artifact was downloaded or not
|
37 |
- if not element._pull():
|
|
48 |
+ if not element._pull(subdir=self._subdir, excluded_subdirs=self._excluded_subdirs):
|
|
38 | 49 |
raise SkipJob(self.action_name)
|
39 | 50 |
|
40 | 51 |
def status(self, element):
|
... | ... | @@ -49,7 +60,7 @@ class PullQueue(Queue): |
49 | 60 |
if not element._can_query_cache():
|
50 | 61 |
return QueueStatus.WAIT
|
51 | 62 |
|
52 |
- if element._pull_pending():
|
|
63 |
+ if element._pull_pending(subdir=self._subdir):
|
|
53 | 64 |
return QueueStatus.READY
|
54 | 65 |
else:
|
55 | 66 |
return QueueStatus.SKIP
|
... | ... | @@ -160,12 +160,14 @@ class Stream(): |
160 | 160 |
# track_cross_junctions (bool): Whether tracking should cross junction boundaries
|
161 | 161 |
# build_all (bool): Whether to build all elements, or only those
|
162 | 162 |
# which are required to build the target.
|
163 |
+ # pull_buildtrees (bool): Whether to pull buildtrees from a remote cache server
|
|
163 | 164 |
#
|
164 | 165 |
def build(self, targets, *,
|
165 | 166 |
track_targets=None,
|
166 | 167 |
track_except=None,
|
167 | 168 |
track_cross_junctions=False,
|
168 |
- build_all=False):
|
|
169 |
+ build_all=False,
|
|
170 |
+ pull_buildtrees=False):
|
|
169 | 171 |
|
170 | 172 |
if build_all:
|
171 | 173 |
selection = PipelineSelection.ALL
|
... | ... | @@ -195,7 +197,11 @@ class Stream(): |
195 | 197 |
self._add_queue(track_queue, track=True)
|
196 | 198 |
|
197 | 199 |
if self._artifacts.has_fetch_remotes():
|
198 |
- self._add_queue(PullQueue(self._scheduler))
|
|
200 |
+ # Query if any of the user defined artifact servers have buildtrees set
|
|
201 |
+ for cache in self._context.artifact_cache_specs:
|
|
202 |
+ if cache.buildtrees:
|
|
203 |
+ pull_buildtrees = True
|
|
204 |
+ self._add_queue(PullQueue(self._scheduler, buildtrees=pull_buildtrees))
|
|
199 | 205 |
|
200 | 206 |
self._add_queue(FetchQueue(self._scheduler, skip_cached=True))
|
201 | 207 |
self._add_queue(BuildQueue(self._scheduler))
|
... | ... | @@ -295,7 +301,8 @@ class Stream(): |
295 | 301 |
#
|
296 | 302 |
def pull(self, targets, *,
|
297 | 303 |
selection=PipelineSelection.NONE,
|
298 |
- remote=None):
|
|
304 |
+ remote=None,
|
|
305 |
+ pull_buildtrees=False):
|
|
299 | 306 |
|
300 | 307 |
use_config = True
|
301 | 308 |
if remote:
|
... | ... | @@ -310,8 +317,13 @@ class Stream(): |
310 | 317 |
if not self._artifacts.has_fetch_remotes():
|
311 | 318 |
raise StreamError("No artifact caches available for pulling artifacts")
|
312 | 319 |
|
320 |
+ # Query if any of the user defined artifact servers have buildtrees set
|
|
321 |
+ for cache in self._context.artifact_cache_specs:
|
|
322 |
+ if cache.buildtrees:
|
|
323 |
+ pull_buildtrees = True
|
|
324 |
+ |
|
313 | 325 |
self._pipeline.assert_consistent(elements)
|
314 |
- self._add_queue(PullQueue(self._scheduler))
|
|
326 |
+ self._add_queue(PullQueue(self._scheduler, buildtrees=pull_buildtrees))
|
|
315 | 327 |
self._enqueue_plan(elements)
|
316 | 328 |
self._run()
|
317 | 329 |
|
... | ... | @@ -467,7 +467,7 @@ def node_get_project_path(node, key, project_dir, *, |
467 | 467 |
"{}: Specified path '{}' does not exist"
|
468 | 468 |
.format(provenance, path_str))
|
469 | 469 |
|
470 |
- is_inside = project_dir_path in full_resolved_path.parents or (
|
|
470 |
+ is_inside = project_dir_path.resolve() in full_resolved_path.parents or (
|
|
471 | 471 |
full_resolved_path == project_dir_path)
|
472 | 472 |
|
473 | 473 |
if path.is_absolute() or not is_inside:
|
... | ... | @@ -26,8 +26,13 @@ logdir: ${XDG_CACHE_HOME}/buildstream/logs |
26 | 26 |
# Cache
|
27 | 27 |
#
|
28 | 28 |
cache:
|
29 |
- # Size of the artifact cache - BuildStream will attempt to keep the
|
|
29 |
+ # Size of the artifact cache in bytes - BuildStream will attempt to keep the
|
|
30 | 30 |
# artifact cache within this size.
|
31 |
+ # If the value is suffixed with K, M, G or T, the specified memory size is
|
|
32 |
+ # parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base
|
|
33 |
+ # 1024), respectively.
|
|
34 |
+ # Alternatively, a percentage value may be specified, which is taken relative
|
|
35 |
+ # to the isize of the file system containing the cache.
|
|
31 | 36 |
quota: infinity
|
32 | 37 |
|
33 | 38 |
#
|
... | ... | @@ -1689,18 +1689,26 @@ class Element(Plugin): |
1689 | 1689 |
|
1690 | 1690 |
# _pull_pending()
|
1691 | 1691 |
#
|
1692 |
- # Check whether the artifact will be pulled.
|
|
1692 |
+ # Check whether the artifact will be pulled. If the pull operation is to
|
|
1693 |
+ # include a specific subdir of the element artifact (from cli or user conf)
|
|
1694 |
+ # then the local cache is queried for the subdirs existence.
|
|
1695 |
+ #
|
|
1696 |
+ # Args:
|
|
1697 |
+ # subdir (str): Whether the pull has been invoked with a specific subdir set
|
|
1693 | 1698 |
#
|
1694 | 1699 |
# Returns:
|
1695 | 1700 |
# (bool): Whether a pull operation is pending
|
1696 | 1701 |
#
|
1697 |
- def _pull_pending(self):
|
|
1702 |
+ def _pull_pending(self, subdir=None):
|
|
1698 | 1703 |
if self._get_workspace():
|
1699 | 1704 |
# Workspace builds are never pushed to artifact servers
|
1700 | 1705 |
return False
|
1701 | 1706 |
|
1702 |
- if self.__strong_cached:
|
|
1703 |
- # Artifact already in local cache
|
|
1707 |
+ if self.__strong_cached and subdir:
|
|
1708 |
+ # If we've specified a subdir, check if the subdir is cached locally
|
|
1709 |
+ if self.__artifacts.contains_subdir_artifact(self, self.__strict_cache_key, subdir):
|
|
1710 |
+ return False
|
|
1711 |
+ elif self.__strong_cached:
|
|
1704 | 1712 |
return False
|
1705 | 1713 |
|
1706 | 1714 |
# Pull is pending if artifact remote server available
|
... | ... | @@ -1722,11 +1730,10 @@ class Element(Plugin): |
1722 | 1730 |
|
1723 | 1731 |
self._update_state()
|
1724 | 1732 |
|
1725 |
- def _pull_strong(self, *, progress=None):
|
|
1733 |
+ def _pull_strong(self, *, progress=None, subdir=None, excluded_subdirs=None):
|
|
1726 | 1734 |
weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
|
1727 |
- |
|
1728 | 1735 |
key = self.__strict_cache_key
|
1729 |
- if not self.__artifacts.pull(self, key, progress=progress):
|
|
1736 |
+ if not self.__artifacts.pull(self, key, progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs):
|
|
1730 | 1737 |
return False
|
1731 | 1738 |
|
1732 | 1739 |
# update weak ref by pointing it to this newly fetched artifact
|
... | ... | @@ -1734,10 +1741,9 @@ class Element(Plugin): |
1734 | 1741 |
|
1735 | 1742 |
return True
|
1736 | 1743 |
|
1737 |
- def _pull_weak(self, *, progress=None):
|
|
1744 |
+ def _pull_weak(self, *, progress=None, subdir=None, excluded_subdirs=None):
|
|
1738 | 1745 |
weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
|
1739 |
- |
|
1740 |
- if not self.__artifacts.pull(self, weak_key, progress=progress):
|
|
1746 |
+ if not self.__artifacts.pull(self, weak_key, progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs):
|
|
1741 | 1747 |
return False
|
1742 | 1748 |
|
1743 | 1749 |
# extract strong cache key from this newly fetched artifact
|
... | ... | @@ -1755,17 +1761,17 @@ class Element(Plugin): |
1755 | 1761 |
#
|
1756 | 1762 |
# Returns: True if the artifact has been downloaded, False otherwise
|
1757 | 1763 |
#
|
1758 |
- def _pull(self):
|
|
1764 |
+ def _pull(self, subdir=None, excluded_subdirs=None):
|
|
1759 | 1765 |
context = self._get_context()
|
1760 | 1766 |
|
1761 | 1767 |
def progress(percent, message):
|
1762 | 1768 |
self.status(message)
|
1763 | 1769 |
|
1764 | 1770 |
# Attempt to pull artifact without knowing whether it's available
|
1765 |
- pulled = self._pull_strong(progress=progress)
|
|
1771 |
+ pulled = self._pull_strong(progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs)
|
|
1766 | 1772 |
|
1767 | 1773 |
if not pulled and not self._cached() and not context.get_strict():
|
1768 |
- pulled = self._pull_weak(progress=progress)
|
|
1774 |
+ pulled = self._pull_weak(progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs)
|
|
1769 | 1775 |
|
1770 | 1776 |
if not pulled:
|
1771 | 1777 |
return False
|
... | ... | @@ -1788,10 +1794,14 @@ class Element(Plugin): |
1788 | 1794 |
if not self._cached():
|
1789 | 1795 |
return True
|
1790 | 1796 |
|
1791 |
- # Do not push tained artifact
|
|
1797 |
+ # Do not push tainted artifact
|
|
1792 | 1798 |
if self.__get_tainted():
|
1793 | 1799 |
return True
|
1794 | 1800 |
|
1801 |
+ # Do not push elements that have a dangling buildtree artifact unless element type is
|
|
1802 |
+ # expected to have an empty buildtree directory
|
|
1803 |
+ if not self.__artifacts.contains_subdir_artifact(self, self.__strict_cache_key, 'buildtree'):
|
|
1804 |
+ return True
|
|
1795 | 1805 |
return False
|
1796 | 1806 |
|
1797 | 1807 |
# _push():
|
... | ... | @@ -43,6 +43,12 @@ git - stage files from a git repository |
43 | 43 |
# will be used to update the 'ref' when refreshing the pipeline.
|
44 | 44 |
track: master
|
45 | 45 |
|
46 |
+ # Optionally specify the ref format used for tracking.
|
|
47 |
+ # The default is 'sha1' for the raw commit hash.
|
|
48 |
+ # If you specify 'git-describe', the commit hash will be prefixed
|
|
49 |
+ # with the closest tag.
|
|
50 |
+ ref-format: sha1
|
|
51 |
+ |
|
46 | 52 |
# Specify the commit ref, this must be specified in order to
|
47 | 53 |
# checkout sources and build, but can be automatically updated
|
48 | 54 |
# if the 'track' attribute was specified.
|
... | ... | @@ -205,7 +211,18 @@ class GitMirror(SourceFetcher): |
205 | 211 |
[self.source.host_git, 'rev-parse', tracking],
|
206 | 212 |
fail="Unable to find commit for specified branch name '{}'".format(tracking),
|
207 | 213 |
cwd=self.mirror)
|
208 |
- return output.rstrip('\n')
|
|
214 |
+ ref = output.rstrip('\n')
|
|
215 |
+ |
|
216 |
+ if self.source.ref_format == 'git-describe':
|
|
217 |
+ # Prefix the ref with the closest tag, if available,
|
|
218 |
+ # to make the ref human readable
|
|
219 |
+ exit_code, output = self.source.check_output(
|
|
220 |
+ [self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
|
|
221 |
+ cwd=self.mirror)
|
|
222 |
+ if exit_code == 0:
|
|
223 |
+ ref = output.rstrip('\n')
|
|
224 |
+ |
|
225 |
+ return ref
|
|
209 | 226 |
|
210 | 227 |
def stage(self, directory, track=None):
|
211 | 228 |
fullpath = os.path.join(directory, self.path)
|
... | ... | @@ -341,13 +358,18 @@ class GitSource(Source): |
341 | 358 |
def configure(self, node):
|
342 | 359 |
ref = self.node_get_member(node, str, 'ref', None)
|
343 | 360 |
|
344 |
- config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules']
|
|
361 |
+ config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules', 'ref-format']
|
|
345 | 362 |
self.node_validate(node, config_keys + Source.COMMON_CONFIG_KEYS)
|
346 | 363 |
|
347 | 364 |
self.original_url = self.node_get_member(node, str, 'url')
|
348 | 365 |
self.mirror = GitMirror(self, '', self.original_url, ref, primary=True)
|
349 | 366 |
self.tracking = self.node_get_member(node, str, 'track', None)
|
350 | 367 |
|
368 |
+ self.ref_format = self.node_get_member(node, str, 'ref-format', 'sha1')
|
|
369 |
+ if self.ref_format not in ['sha1', 'git-describe']:
|
|
370 |
+ provenance = self.node_provenance(node, member_name='ref-format')
|
|
371 |
+ raise SourceError("{}: Unexpected value for ref-format: {}".format(provenance, self.ref_format))
|
|
372 |
+ |
|
351 | 373 |
# At this point we now know if the source has a ref and/or a track.
|
352 | 374 |
# If it is missing both then we will be unable to track or build.
|
353 | 375 |
if self.mirror.ref is None and self.tracking is None:
|
... | ... | @@ -18,6 +18,5 @@ |
18 | 18 |
# Tristan Maat <tristan maat codethink co uk>
|
19 | 19 |
|
20 | 20 |
from .sandbox import Sandbox, SandboxFlags
|
21 |
-from ._sandboxchroot import SandboxChroot
|
|
22 |
-from ._sandboxbwrap import SandboxBwrap
|
|
23 | 21 |
from ._sandboxremote import SandboxRemote
|
22 |
+from ._sandboxdummy import SandboxDummy
|
1 |
+#
|
|
2 |
+# Copyright (C) 2017 Codethink Limited
|
|
3 |
+#
|
|
4 |
+# This program is free software; you can redistribute it and/or
|
|
5 |
+# modify it under the terms of the GNU Lesser General Public
|
|
6 |
+# License as published by the Free Software Foundation; either
|
|
7 |
+# version 2 of the License, or (at your option) any later version.
|
|
8 |
+#
|
|
9 |
+# This library is distributed in the hope that it will be useful,
|
|
10 |
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11 |
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
12 |
+# Lesser General Public License for more details.
|
|
13 |
+#
|
|
14 |
+# You should have received a copy of the GNU Lesser General Public
|
|
15 |
+# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
|
16 |
+#
|
|
17 |
+# Authors:
|
|
18 |
+ |
|
19 |
+from .._exceptions import SandboxError
|
|
20 |
+from . import Sandbox
|
|
21 |
+ |
|
22 |
+ |
|
23 |
+class SandboxDummy(Sandbox):
|
|
24 |
+ def __init__(self, *args, **kwargs):
|
|
25 |
+ super().__init__(*args, **kwargs)
|
|
26 |
+ |
|
27 |
+ def run(self, command, flags, *, cwd=None, env=None):
|
|
28 |
+ |
|
29 |
+ # Fallback to the sandbox default settings for
|
|
30 |
+ # the cwd and env.
|
|
31 |
+ #
|
|
32 |
+ cwd = self._get_work_directory(cwd=cwd)
|
|
33 |
+ env = self._get_environment(cwd=cwd, env=env)
|
|
34 |
+ |
|
35 |
+ if not self._has_command(command[0], env):
|
|
36 |
+ raise SandboxError("Staged artifacts do not provide command "
|
|
37 |
+ "'{}'".format(command[0]),
|
|
38 |
+ reason='missing-command')
|
|
39 |
+ |
|
40 |
+ raise SandboxError("This platform does not support local builds")
|
... | ... | @@ -35,6 +35,7 @@ import tempfile |
35 | 35 |
import itertools
|
36 | 36 |
import functools
|
37 | 37 |
from contextlib import contextmanager
|
38 |
+from stat import S_ISDIR
|
|
38 | 39 |
|
39 | 40 |
import psutil
|
40 | 41 |
|
... | ... | @@ -328,27 +329,25 @@ def safe_remove(path): |
328 | 329 |
Raises:
|
329 | 330 |
UtilError: In the case of unexpected system call failures
|
330 | 331 |
"""
|
331 |
- if os.path.lexists(path):
|
|
332 |
- |
|
333 |
- # Try to remove anything that is in the way, but issue
|
|
334 |
- # a warning instead if it removes a non empty directory
|
|
335 |
- try:
|
|
332 |
+ try:
|
|
333 |
+ if S_ISDIR(os.lstat(path).st_mode):
|
|
334 |
+ os.rmdir(path)
|
|
335 |
+ else:
|
|
336 | 336 |
os.unlink(path)
|
337 |
- except OSError as e:
|
|
338 |
- if e.errno != errno.EISDIR:
|
|
339 |
- raise UtilError("Failed to remove '{}': {}"
|
|
340 |
- .format(path, e))
|
|
341 |
- |
|
342 |
- try:
|
|
343 |
- os.rmdir(path)
|
|
344 |
- except OSError as e:
|
|
345 |
- if e.errno == errno.ENOTEMPTY:
|
|
346 |
- return False
|
|
347 |
- else:
|
|
348 |
- raise UtilError("Failed to remove '{}': {}"
|
|
349 |
- .format(path, e))
|
|
350 | 337 |
|
351 |
- return True
|
|
338 |
+ # File removed/unlinked successfully
|
|
339 |
+ return True
|
|
340 |
+ |
|
341 |
+ except OSError as e:
|
|
342 |
+ if e.errno == errno.ENOTEMPTY:
|
|
343 |
+ # Path is non-empty directory
|
|
344 |
+ return False
|
|
345 |
+ elif e.errno == errno.ENOENT:
|
|
346 |
+ # Path does not exist
|
|
347 |
+ return True
|
|
348 |
+ |
|
349 |
+ raise UtilError("Failed to remove '{}': {}"
|
|
350 |
+ .format(path, e))
|
|
352 | 351 |
|
353 | 352 |
|
354 | 353 |
def copy_files(src, dest, *, files=None, ignore_missing=False, report_written=False):
|
... | ... | @@ -188,3 +188,15 @@ def test_project_refs_options(cli, datafiles): |
188 | 188 |
|
189 | 189 |
# Assert that the cache keys are different
|
190 | 190 |
assert result1.output != result2.output
|
191 |
+ |
|
192 |
+ |
|
193 |
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'element-path'))
|
|
194 |
+def test_element_path_project_path_contains_symlinks(cli, datafiles, tmpdir):
|
|
195 |
+ real_project = str(datafiles)
|
|
196 |
+ linked_project = os.path.join(str(tmpdir), 'linked')
|
|
197 |
+ os.symlink(real_project, linked_project)
|
|
198 |
+ os.makedirs(os.path.join(real_project, 'elements'), exist_ok=True)
|
|
199 |
+ with open(os.path.join(real_project, 'elements', 'element.bst'), 'w') as f:
|
|
200 |
+ f.write("kind: manual\n")
|
|
201 |
+ result = cli.run(project=linked_project, args=['show', 'element.bst'])
|
|
202 |
+ result.assert_success()
|
... | ... | @@ -121,7 +121,7 @@ def test_build_depend_on_cached_fail(cli, tmpdir, datafiles): |
121 | 121 |
|
122 | 122 |
@pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
|
123 | 123 |
@pytest.mark.datafiles(DATA_DIR)
|
124 |
-@pytest.mark.parametrize("on_error", ("continue",))
|
|
124 |
+@pytest.mark.parametrize("on_error", ("continue", "quit"))
|
|
125 | 125 |
def test_push_cached_fail(cli, tmpdir, datafiles, on_error):
|
126 | 126 |
project = os.path.join(datafiles.dirname, datafiles.basename)
|
127 | 127 |
element_path = os.path.join(project, 'elements', 'element.bst')
|
... | ... | @@ -476,3 +476,50 @@ def test_ref_not_in_track_warn_error(cli, tmpdir, datafiles): |
476 | 476 |
result = cli.run(project=project, args=['build', 'target.bst'])
|
477 | 477 |
result.assert_main_error(ErrorDomain.STREAM, None)
|
478 | 478 |
result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.REF_NOT_IN_TRACK)
|
479 |
+ |
|
480 |
+ |
|
481 |
+@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
|
|
482 |
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'template'))
|
|
483 |
+@pytest.mark.parametrize("ref_format", ['sha1', 'git-describe'])
|
|
484 |
+@pytest.mark.parametrize("tag,extra_commit", [(False, False), (True, False), (True, True)])
|
|
485 |
+def test_track_fetch(cli, tmpdir, datafiles, ref_format, tag, extra_commit):
|
|
486 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
487 |
+ |
|
488 |
+ # Create the repo from 'repofiles' subdir
|
|
489 |
+ repo = create_repo('git', str(tmpdir))
|
|
490 |
+ ref = repo.create(os.path.join(project, 'repofiles'))
|
|
491 |
+ if tag:
|
|
492 |
+ repo.add_tag('tag')
|
|
493 |
+ if extra_commit:
|
|
494 |
+ repo.add_commit()
|
|
495 |
+ |
|
496 |
+ # Write out our test target
|
|
497 |
+ element = {
|
|
498 |
+ 'kind': 'import',
|
|
499 |
+ 'sources': [
|
|
500 |
+ repo.source_config()
|
|
501 |
+ ]
|
|
502 |
+ }
|
|
503 |
+ element['sources'][0]['ref-format'] = ref_format
|
|
504 |
+ element_path = os.path.join(project, 'target.bst')
|
|
505 |
+ _yaml.dump(element, element_path)
|
|
506 |
+ |
|
507 |
+ # Track it
|
|
508 |
+ result = cli.run(project=project, args=['track', 'target.bst'])
|
|
509 |
+ result.assert_success()
|
|
510 |
+ |
|
511 |
+ element = _yaml.load(element_path)
|
|
512 |
+ new_ref = element['sources'][0]['ref']
|
|
513 |
+ |
|
514 |
+ if ref_format == 'git-describe' and tag:
|
|
515 |
+ # Check and strip prefix
|
|
516 |
+ prefix = 'tag-{}-g'.format(0 if not extra_commit else 1)
|
|
517 |
+ assert new_ref.startswith(prefix)
|
|
518 |
+ new_ref = new_ref[len(prefix):]
|
|
519 |
+ |
|
520 |
+ # 40 chars for SHA-1
|
|
521 |
+ assert len(new_ref) == 40
|
|
522 |
+ |
|
523 |
+ # Fetch it
|
|
524 |
+ result = cli.run(project=project, args=['fetch', 'target.bst'])
|
|
525 |
+ result.assert_success()
|
... | ... | @@ -42,6 +42,9 @@ class Git(Repo): |
42 | 42 |
self._run_git('commit', '-m', 'Initial commit')
|
43 | 43 |
return self.latest_commit()
|
44 | 44 |
|
45 |
+ def add_tag(self, tag):
|
|
46 |
+ self._run_git('tag', tag)
|
|
47 |
+ |
|
45 | 48 |
def add_commit(self):
|
46 | 49 |
self._run_git('commit', '--allow-empty', '-m', 'Additional commit')
|
47 | 50 |
return self.latest_commit()
|