Tom Pollard pushed to branch tpollard/829 at BuildStream / buildstream
Commits:
-
3e36e363
by Tristan Van Berkom at 2019-01-24T16:55:24Z
-
ce01f87e
by Tristan Van Berkom at 2019-01-24T16:55:24Z
-
2479e8df
by Tristan Van Berkom at 2019-01-24T16:55:24Z
-
fdb8ff65
by Tristan Van Berkom at 2019-01-24T16:55:24Z
-
acd0bf22
by Tristan Van Berkom at 2019-01-24T18:01:41Z
-
41f03296
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
24ca2f46
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
9fd9fbb9
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
7ee0c579
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
353293b6
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
51ed36de
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
5797238b
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
8074ebf4
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
49c11bc8
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
3616e939
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
bcd19266
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
9c33107f
by Tristan Van Berkom at 2019-01-24T18:02:08Z
-
a2140d74
by Tristan Van Berkom at 2019-01-24T19:44:52Z
-
0ec6a5cf
by Tom Pollard at 2019-01-25T10:35:17Z
-
3a5914c9
by Tom Pollard at 2019-01-25T10:35:17Z
16 changed files:
- buildstream/_artifactcache.py
- buildstream/_context.py
- buildstream/_frontend/app.py
- buildstream/_frontend/cli.py
- buildstream/_frontend/status.py
- buildstream/_frontend/widget.py
- buildstream/_scheduler/jobs/cleanupjob.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/resources.py
- buildstream/_scheduler/scheduler.py
- buildstream/_stream.py
- buildstream/utils.py
- tests/artifactcache/cache_size.py
- tests/artifactcache/expiry.py
- tests/frontend/logging.py
- tests/integration/build-tree.py
Changes:
| ... | ... | @@ -46,6 +46,39 @@ class ArtifactCacheSpec(CASRemoteSpec): |
| 46 | 46 |
pass
|
| 47 | 47 |
|
| 48 | 48 |
|
| 49 |
+# ArtifactCacheUsage
|
|
| 50 |
+#
|
|
| 51 |
+# A simple object to report the current artifact cache
|
|
| 52 |
+# usage details.
|
|
| 53 |
+#
|
|
| 54 |
+# Note that this uses the user configured cache quota
|
|
| 55 |
+# rather than the internal quota with protective headroom
|
|
| 56 |
+# removed, to provide a more sensible value to display to
|
|
| 57 |
+# the user.
|
|
| 58 |
+#
|
|
| 59 |
+# Args:
|
|
| 60 |
+# artifacts (ArtifactCache): The artifact cache to get the status of
|
|
| 61 |
+#
|
|
| 62 |
+class ArtifactCacheUsage():
|
|
| 63 |
+ |
|
| 64 |
+ def __init__(self, artifacts):
|
|
| 65 |
+ context = artifacts.context
|
|
| 66 |
+ self.quota_config = context.config_cache_quota # Configured quota
|
|
| 67 |
+ self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes
|
|
| 68 |
+ self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes
|
|
| 69 |
+ self.used_percent = 0 # Percentage of the quota used
|
|
| 70 |
+ if self.quota_size is not None:
|
|
| 71 |
+ self.used_percent = int(self.used_size * 100 / self.quota_size)
|
|
| 72 |
+ |
|
| 73 |
+ # Formattable into a human readable string
|
|
| 74 |
+ #
|
|
| 75 |
+ def __str__(self):
|
|
| 76 |
+ return "{} / {} ({}%)" \
|
|
| 77 |
+ .format(utils._pretty_size(self.used_size, dec_places=1),
|
|
| 78 |
+ self.quota_config,
|
|
| 79 |
+ self.used_percent)
|
|
| 80 |
+ |
|
| 81 |
+ |
|
| 49 | 82 |
# An ArtifactCache manages artifacts.
|
| 50 | 83 |
#
|
| 51 | 84 |
# Args:
|
| ... | ... | @@ -64,6 +97,7 @@ class ArtifactCache(): |
| 64 | 97 |
self._required_elements = set() # The elements required for this session
|
| 65 | 98 |
self._cache_size = None # The current cache size, sometimes it's an estimate
|
| 66 | 99 |
self._cache_quota = None # The cache quota
|
| 100 |
+ self._cache_quota_original = None # The cache quota as specified by the user, in bytes
|
|
| 67 | 101 |
self._cache_lower_threshold = None # The target cache size for a cleanup
|
| 68 | 102 |
self._remotes_setup = False # Check to prevent double-setup of remotes
|
| 69 | 103 |
|
| ... | ... | @@ -216,11 +250,33 @@ class ArtifactCache(): |
| 216 | 250 |
#
|
| 217 | 251 |
# Clean the artifact cache as much as possible.
|
| 218 | 252 |
#
|
| 253 |
+ # Args:
|
|
| 254 |
+ # progress (callable): A callback to call when a ref is removed
|
|
| 255 |
+ #
|
|
| 219 | 256 |
# Returns:
|
| 220 | 257 |
# (int): The size of the cache after having cleaned up
|
| 221 | 258 |
#
|
| 222 |
- def clean(self):
|
|
| 259 |
+ def clean(self, progress=None):
|
|
| 223 | 260 |
artifacts = self.list_artifacts()
|
| 261 |
+ context = self.context
|
|
| 262 |
+ |
|
| 263 |
+ # Some accumulative statistics
|
|
| 264 |
+ removed_ref_count = 0
|
|
| 265 |
+ space_saved = 0
|
|
| 266 |
+ |
|
| 267 |
+ # Start off with an announcement with as much info as possible
|
|
| 268 |
+ volume_size, volume_avail = self._get_cache_volume_size()
|
|
| 269 |
+ self._message(MessageType.STATUS, "Starting cache cleanup",
|
|
| 270 |
+ detail=("Elements required by the current build plan: {}\n" +
|
|
| 271 |
+ "User specified quota: {} ({})\n" +
|
|
| 272 |
+ "Cache usage: {}\n" +
|
|
| 273 |
+ "Cache volume: {} total, {} available")
|
|
| 274 |
+ .format(len(self._required_elements),
|
|
| 275 |
+ context.config_cache_quota,
|
|
| 276 |
+ utils._pretty_size(self._cache_quota_original, dec_places=2),
|
|
| 277 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
|
|
| 278 |
+ utils._pretty_size(volume_size, dec_places=2),
|
|
| 279 |
+ utils._pretty_size(volume_avail, dec_places=2)))
|
|
| 224 | 280 |
|
| 225 | 281 |
# Build a set of the cache keys which are required
|
| 226 | 282 |
# based on the required elements at cleanup time
|
| ... | ... | @@ -245,11 +301,18 @@ class ArtifactCache(): |
| 245 | 301 |
# can't remove them, we have to abort the build.
|
| 246 | 302 |
#
|
| 247 | 303 |
# FIXME: Asking the user what to do may be neater
|
| 304 |
+ #
|
|
| 248 | 305 |
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
|
| 249 | 306 |
'buildstream.conf')
|
| 250 |
- detail = ("There is not enough space to complete the build.\n"
|
|
| 251 |
- "Please increase the cache-quota in {}."
|
|
| 252 |
- .format(self.context.config_origin or default_conf))
|
|
| 307 |
+ detail = ("Aborted after removing {} refs and saving {} disk space.\n"
|
|
| 308 |
+ "The remaining {} in the cache is required by the {} elements in your build plan\n\n"
|
|
| 309 |
+ "There is not enough space to complete the build.\n"
|
|
| 310 |
+ "Please increase the cache-quota in {} and/or make more disk space."
|
|
| 311 |
+ .format(removed_ref_count,
|
|
| 312 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
| 313 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
|
|
| 314 |
+ len(self._required_elements),
|
|
| 315 |
+ (context.config_origin or default_conf)))
|
|
| 253 | 316 |
|
| 254 | 317 |
if self.has_quota_exceeded():
|
| 255 | 318 |
raise ArtifactError("Cache too full. Aborting.",
|
| ... | ... | @@ -264,10 +327,33 @@ class ArtifactCache(): |
| 264 | 327 |
# Remove the actual artifact, if it's not required.
|
| 265 | 328 |
size = self.remove(to_remove)
|
| 266 | 329 |
|
| 330 |
+ removed_ref_count += 1
|
|
| 331 |
+ space_saved += size
|
|
| 332 |
+ |
|
| 333 |
+ self._message(MessageType.STATUS,
|
|
| 334 |
+ "Freed {: <7} {}".format(
|
|
| 335 |
+ utils._pretty_size(size, dec_places=2),
|
|
| 336 |
+ to_remove))
|
|
| 337 |
+ |
|
| 267 | 338 |
# Remove the size from the removed size
|
| 268 | 339 |
self.set_cache_size(self._cache_size - size)
|
| 269 | 340 |
|
| 270 |
- # This should be O(1) if implemented correctly
|
|
| 341 |
+ # User callback
|
|
| 342 |
+ #
|
|
| 343 |
+ # Currently this process is fairly slow, but we should
|
|
| 344 |
+ # think about throttling this progress() callback if this
|
|
| 345 |
+ # becomes too intense.
|
|
| 346 |
+ if progress:
|
|
| 347 |
+ progress()
|
|
| 348 |
+ |
|
| 349 |
+ # Informational message about the side effects of the cleanup
|
|
| 350 |
+ self._message(MessageType.INFO, "Cleanup completed",
|
|
| 351 |
+ detail=("Removed {} refs and saving {} disk space.\n" +
|
|
| 352 |
+ "Cache usage is now: {}")
|
|
| 353 |
+ .format(removed_ref_count,
|
|
| 354 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
| 355 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2)))
|
|
| 356 |
+ |
|
| 271 | 357 |
return self.get_cache_size()
|
| 272 | 358 |
|
| 273 | 359 |
# compute_cache_size()
|
| ... | ... | @@ -279,7 +365,14 @@ class ArtifactCache(): |
| 279 | 365 |
# (int): The size of the artifact cache.
|
| 280 | 366 |
#
|
| 281 | 367 |
def compute_cache_size(self):
|
| 282 |
- self._cache_size = self.cas.calculate_cache_size()
|
|
| 368 |
+ old_cache_size = self._cache_size
|
|
| 369 |
+ new_cache_size = self.cas.calculate_cache_size()
|
|
| 370 |
+ |
|
| 371 |
+ if old_cache_size != new_cache_size:
|
|
| 372 |
+ self._cache_size = new_cache_size
|
|
| 373 |
+ |
|
| 374 |
+ usage = ArtifactCacheUsage(self)
|
|
| 375 |
+ self._message(MessageType.STATUS, "Cache usage recomputed: {}".format(usage))
|
|
| 283 | 376 |
|
| 284 | 377 |
return self._cache_size
|
| 285 | 378 |
|
| ... | ... | @@ -307,7 +400,7 @@ class ArtifactCache(): |
| 307 | 400 |
# it is greater than the actual cache size.
|
| 308 | 401 |
#
|
| 309 | 402 |
# Returns:
|
| 310 |
- # (int) An approximation of the artifact cache size.
|
|
| 403 |
+ # (int) An approximation of the artifact cache size, in bytes.
|
|
| 311 | 404 |
#
|
| 312 | 405 |
def get_cache_size(self):
|
| 313 | 406 |
|
| ... | ... | @@ -459,8 +552,7 @@ class ArtifactCache(): |
| 459 | 552 |
# `ArtifactCache.get_artifact_fullname`)
|
| 460 | 553 |
#
|
| 461 | 554 |
# Returns:
|
| 462 |
- # (int|None) The amount of space pruned from the repository in
|
|
| 463 |
- # Bytes, or None if defer_prune is True
|
|
| 555 |
+ # (int): The amount of space recovered in the cache, in bytes
|
|
| 464 | 556 |
#
|
| 465 | 557 |
def remove(self, ref):
|
| 466 | 558 |
|
| ... | ... | @@ -848,19 +940,16 @@ class ArtifactCache(): |
| 848 | 940 |
else:
|
| 849 | 941 |
headroom = 2e9
|
| 850 | 942 |
|
| 851 |
- artifactdir_volume = self.context.artifactdir
|
|
| 852 |
- while not os.path.exists(artifactdir_volume):
|
|
| 853 |
- artifactdir_volume = os.path.dirname(artifactdir_volume)
|
|
| 854 |
- |
|
| 855 | 943 |
try:
|
| 856 |
- cache_quota = utils._parse_size(self.context.config_cache_quota, artifactdir_volume)
|
|
| 944 |
+ cache_quota = utils._parse_size(self.context.config_cache_quota,
|
|
| 945 |
+ self.context.artifactdir)
|
|
| 857 | 946 |
except utils.UtilError as e:
|
| 858 | 947 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
| 859 | 948 |
"{}\nPlease specify the value in bytes or as a % of full disk space.\n"
|
| 860 | 949 |
"\nValid values are, for example: 800M 10G 1T 50%\n"
|
| 861 | 950 |
.format(str(e))) from e
|
| 862 | 951 |
|
| 863 |
- available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
|
|
| 952 |
+ total_size, available_space = self._get_cache_volume_size()
|
|
| 864 | 953 |
cache_size = self.get_cache_size()
|
| 865 | 954 |
|
| 866 | 955 |
# Ensure system has enough storage for the cache_quota
|
| ... | ... | @@ -900,22 +989,25 @@ class ArtifactCache(): |
| 900 | 989 |
# if we end up writing more than 2G, but hey, this stuff is
|
| 901 | 990 |
# already really fuzzy.
|
| 902 | 991 |
#
|
| 992 |
+ self._cache_quota_original = cache_quota
|
|
| 903 | 993 |
self._cache_quota = cache_quota - headroom
|
| 904 | 994 |
self._cache_lower_threshold = self._cache_quota / 2
|
| 905 | 995 |
|
| 906 |
- # _get_volume_space_info_for
|
|
| 907 |
- #
|
|
| 908 |
- # Get the available space and total space for the given volume
|
|
| 996 |
+ # _get_cache_volume_size()
|
|
| 909 | 997 |
#
|
| 910 |
- # Args:
|
|
| 911 |
- # volume: volume for which to get the size
|
|
| 998 |
+ # Get the available space and total space for the volume on
|
|
| 999 |
+ # which the artifact cache is located.
|
|
| 912 | 1000 |
#
|
| 913 | 1001 |
# Returns:
|
| 914 |
- # A tuple containing first the availabe number of bytes on the requested
|
|
| 915 |
- # volume, then the total number of bytes of the volume.
|
|
| 916 |
- def _get_volume_space_info_for(self, volume):
|
|
| 917 |
- stat = os.statvfs(volume)
|
|
| 918 |
- return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
|
|
| 1002 |
+ # (int): The total number of bytes on the volume
|
|
| 1003 |
+ # (int): The number of available bytes on the volume
|
|
| 1004 |
+ #
|
|
| 1005 |
+ # NOTE: We use this stub to allow the test cases
|
|
| 1006 |
+ # to override what an artifact cache thinks
|
|
| 1007 |
+ # about it's disk size and available bytes.
|
|
| 1008 |
+ #
|
|
| 1009 |
+ def _get_cache_volume_size(self):
|
|
| 1010 |
+ return utils._get_volume_size(self.context.artifactdir)
|
|
| 919 | 1011 |
|
| 920 | 1012 |
|
| 921 | 1013 |
# _configured_remote_artifact_cache_specs():
|
| ... | ... | @@ -30,7 +30,7 @@ from . import _yaml |
| 30 | 30 |
from ._exceptions import LoadError, LoadErrorReason, BstError
|
| 31 | 31 |
from ._message import Message, MessageType
|
| 32 | 32 |
from ._profile import Topics, profile_start, profile_end
|
| 33 |
-from ._artifactcache import ArtifactCache
|
|
| 33 |
+from ._artifactcache import ArtifactCache, ArtifactCacheUsage
|
|
| 34 | 34 |
from ._cas import CASCache
|
| 35 | 35 |
from ._workspaces import Workspaces, WorkspaceProjectCache
|
| 36 | 36 |
from .plugin import _plugin_lookup
|
| ... | ... | @@ -289,6 +289,16 @@ class Context(): |
| 289 | 289 |
|
| 290 | 290 |
return self._artifactcache
|
| 291 | 291 |
|
| 292 |
+ # get_artifact_cache_usage()
|
|
| 293 |
+ #
|
|
| 294 |
+ # Fetches the current usage of the artifact cache
|
|
| 295 |
+ #
|
|
| 296 |
+ # Returns:
|
|
| 297 |
+ # (ArtifactCacheUsage): The current status
|
|
| 298 |
+ #
|
|
| 299 |
+ def get_artifact_cache_usage(self):
|
|
| 300 |
+ return ArtifactCacheUsage(self.artifactcache)
|
|
| 301 |
+ |
|
| 292 | 302 |
# add_project():
|
| 293 | 303 |
#
|
| 294 | 304 |
# Add a project to the context.
|
| ... | ... | @@ -194,11 +194,6 @@ class App(): |
| 194 | 194 |
except BstError as e:
|
| 195 | 195 |
self._error_exit(e, "Error instantiating platform")
|
| 196 | 196 |
|
| 197 |
- try:
|
|
| 198 |
- self.context.artifactcache.preflight()
|
|
| 199 |
- except BstError as e:
|
|
| 200 |
- self._error_exit(e, "Error instantiating artifact cache")
|
|
| 201 |
- |
|
| 202 | 197 |
# Create the logger right before setting the message handler
|
| 203 | 198 |
self.logger = LogLine(self.context,
|
| 204 | 199 |
self._content_profile,
|
| ... | ... | @@ -211,6 +206,13 @@ class App(): |
| 211 | 206 |
# Propagate pipeline feedback to the user
|
| 212 | 207 |
self.context.set_message_handler(self._message_handler)
|
| 213 | 208 |
|
| 209 |
+ # Preflight the artifact cache after initializing logging,
|
|
| 210 |
+ # this can cause messages to be emitted.
|
|
| 211 |
+ try:
|
|
| 212 |
+ self.context.artifactcache.preflight()
|
|
| 213 |
+ except BstError as e:
|
|
| 214 |
+ self._error_exit(e, "Error instantiating artifact cache")
|
|
| 215 |
+ |
|
| 214 | 216 |
#
|
| 215 | 217 |
# Load the Project
|
| 216 | 218 |
#
|
| ... | ... | @@ -526,7 +526,7 @@ def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command) |
| 526 | 526 |
else:
|
| 527 | 527 |
scope = Scope.RUN
|
| 528 | 528 |
|
| 529 |
- use_buildtree = False
|
|
| 529 |
+ use_buildtree = None
|
|
| 530 | 530 |
|
| 531 | 531 |
with app.initialized():
|
| 532 | 532 |
if not element:
|
| ... | ... | @@ -534,7 +534,8 @@ def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command) |
| 534 | 534 |
if not element:
|
| 535 | 535 |
raise AppError('Missing argument "ELEMENT".')
|
| 536 | 536 |
|
| 537 |
- dependencies = app.stream.load_selection((element,), selection=PipelineSelection.NONE)
|
|
| 537 |
+ dependencies = app.stream.load_selection((element,), selection=PipelineSelection.NONE,
|
|
| 538 |
+ use_artifact_config=True)
|
|
| 538 | 539 |
element = dependencies[0]
|
| 539 | 540 |
prompt = app.shell_prompt(element)
|
| 540 | 541 |
mounts = [
|
| ... | ... | @@ -543,20 +544,31 @@ def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command) |
| 543 | 544 |
]
|
| 544 | 545 |
|
| 545 | 546 |
cached = element._cached_buildtree()
|
| 546 |
- if cli_buildtree == "always":
|
|
| 547 |
- if cached:
|
|
| 548 |
- use_buildtree = True
|
|
| 549 |
- else:
|
|
| 550 |
- raise AppError("No buildtree is cached but the use buildtree option was specified")
|
|
| 551 |
- elif cli_buildtree == "never":
|
|
| 552 |
- pass
|
|
| 553 |
- elif cli_buildtree == "try":
|
|
| 554 |
- use_buildtree = cached
|
|
| 547 |
+ if cli_buildtree in ("always", "try"):
|
|
| 548 |
+ use_buildtree = cli_buildtree
|
|
| 549 |
+ if not cached and use_buildtree == "always":
|
|
| 550 |
+ click.echo("WARNING: buildtree is not cached locally, will attempt to pull from available remotes",
|
|
| 551 |
+ err=True)
|
|
| 555 | 552 |
else:
|
| 556 |
- if app.interactive and cached:
|
|
| 557 |
- use_buildtree = bool(click.confirm('Do you want to use the cached buildtree?'))
|
|
| 553 |
+ # If the value has defaulted to ask and in non interactive mode, don't consider the buildtree, this
|
|
| 554 |
+ # being the default behaviour of the command
|
|
| 555 |
+ if app.interactive and cli_buildtree == "ask":
|
|
| 556 |
+ if cached and bool(click.confirm('Do you want to use the cached buildtree?')):
|
|
| 557 |
+ use_buildtree = "always"
|
|
| 558 |
+ elif not cached:
|
|
| 559 |
+ try:
|
|
| 560 |
+ choice = click.prompt("Do you want to pull & use a cached buildtree?",
|
|
| 561 |
+ type=click.Choice(['try', 'always', 'never']),
|
|
| 562 |
+ err=True, show_choices=True)
|
|
| 563 |
+ except click.Abort:
|
|
| 564 |
+ click.echo('Aborting', err=True)
|
|
| 565 |
+ sys.exit(-1)
|
|
| 566 |
+ |
|
| 567 |
+ if choice != "never":
|
|
| 568 |
+ use_buildtree = choice
|
|
| 569 |
+ |
|
| 558 | 570 |
if use_buildtree and not element._cached_success():
|
| 559 |
- click.echo("Warning: using a buildtree from a failed build.")
|
|
| 571 |
+ click.echo("WARNING: using a buildtree from a failed build.", err=True)
|
|
| 560 | 572 |
|
| 561 | 573 |
try:
|
| 562 | 574 |
exitcode = app.stream.shell(element, scope, prompt,
|
| ... | ... | @@ -353,13 +353,17 @@ class _StatusHeader(): |
| 353 | 353 |
def render(self, line_length, elapsed):
|
| 354 | 354 |
project = self._context.get_toplevel_project()
|
| 355 | 355 |
line_length = max(line_length, 80)
|
| 356 |
- size = 0
|
|
| 357 |
- text = ''
|
|
| 358 | 356 |
|
| 357 |
+ #
|
|
| 358 |
+ # Line 1: Session time, project name, session / total elements
|
|
| 359 |
+ #
|
|
| 360 |
+ # ========= 00:00:00 project-name (143/387) =========
|
|
| 361 |
+ #
|
|
| 359 | 362 |
session = str(len(self._stream.session_elements))
|
| 360 | 363 |
total = str(len(self._stream.total_elements))
|
| 361 | 364 |
|
| 362 |
- # Format and calculate size for target and overall time code
|
|
| 365 |
+ size = 0
|
|
| 366 |
+ text = ''
|
|
| 363 | 367 |
size += len(total) + len(session) + 4 # Size for (N/N) with a leading space
|
| 364 | 368 |
size += 8 # Size of time code
|
| 365 | 369 |
size += len(project.name) + 1
|
| ... | ... | @@ -372,6 +376,12 @@ class _StatusHeader(): |
| 372 | 376 |
self._format_profile.fmt(')')
|
| 373 | 377 |
|
| 374 | 378 |
line1 = self._centered(text, size, line_length, '=')
|
| 379 |
+ |
|
| 380 |
+ #
|
|
| 381 |
+ # Line 2: Dynamic list of queue status reports
|
|
| 382 |
+ #
|
|
| 383 |
+ # (Fetched:0 117 0)→ (Built:4 0 0)
|
|
| 384 |
+ #
|
|
| 375 | 385 |
size = 0
|
| 376 | 386 |
text = ''
|
| 377 | 387 |
|
| ... | ... | @@ -389,10 +399,28 @@ class _StatusHeader(): |
| 389 | 399 |
|
| 390 | 400 |
line2 = self._centered(text, size, line_length, ' ')
|
| 391 | 401 |
|
| 392 |
- size = 24
|
|
| 393 |
- text = self._format_profile.fmt("~~~~~ ") + \
|
|
| 394 |
- self._content_profile.fmt('Active Tasks') + \
|
|
| 395 |
- self._format_profile.fmt(" ~~~~~")
|
|
| 402 |
+ #
|
|
| 403 |
+ # Line 3: Cache usage percentage report
|
|
| 404 |
+ #
|
|
| 405 |
+ # ~~~~~~ cache: 69% ~~~~~~
|
|
| 406 |
+ #
|
|
| 407 |
+ usage = self._context.get_artifact_cache_usage()
|
|
| 408 |
+ usage_percent = '{}%'.format(usage.used_percent)
|
|
| 409 |
+ |
|
| 410 |
+ size = 21
|
|
| 411 |
+ size += len(usage_percent)
|
|
| 412 |
+ if usage.used_percent >= 95:
|
|
| 413 |
+ formatted_usage_percent = self._error_profile.fmt(usage_percent)
|
|
| 414 |
+ elif usage.used_percent >= 80:
|
|
| 415 |
+ formatted_usage_percent = self._content_profile.fmt(usage_percent)
|
|
| 416 |
+ else:
|
|
| 417 |
+ formatted_usage_percent = self._success_profile.fmt(usage_percent)
|
|
| 418 |
+ |
|
| 419 |
+ text = self._format_profile.fmt("~~~~~~ ") + \
|
|
| 420 |
+ self._content_profile.fmt('cache') + \
|
|
| 421 |
+ self._format_profile.fmt(': ') + \
|
|
| 422 |
+ formatted_usage_percent + \
|
|
| 423 |
+ self._format_profile.fmt(' ~~~~~~')
|
|
| 396 | 424 |
line3 = self._centered(text, size, line_length, ' ')
|
| 397 | 425 |
|
| 398 | 426 |
return line1 + '\n' + line2 + '\n' + line3
|
| ... | ... | @@ -175,29 +175,22 @@ class TypeName(Widget): |
| 175 | 175 |
# A widget for displaying the Element name
|
| 176 | 176 |
class ElementName(Widget):
|
| 177 | 177 |
|
| 178 |
- def __init__(self, context, content_profile, format_profile):
|
|
| 179 |
- super(ElementName, self).__init__(context, content_profile, format_profile)
|
|
| 180 |
- |
|
| 181 |
- # Pre initialization format string, before we know the length of
|
|
| 182 |
- # element names in the pipeline
|
|
| 183 |
- self._fmt_string = '{: <30}'
|
|
| 184 |
- |
|
| 185 | 178 |
def render(self, message):
|
| 179 |
+ action_name = message.action_name
|
|
| 186 | 180 |
element_id = message.task_id or message.unique_id
|
| 187 |
- if element_id is None:
|
|
| 188 |
- return ""
|
|
| 189 |
- |
|
| 190 |
- plugin = _plugin_lookup(element_id)
|
|
| 191 |
- name = plugin._get_full_name()
|
|
| 181 |
+ if element_id is not None:
|
|
| 182 |
+ plugin = _plugin_lookup(element_id)
|
|
| 183 |
+ name = plugin._get_full_name()
|
|
| 184 |
+ name = '{: <30}'.format(name)
|
|
| 185 |
+ else:
|
|
| 186 |
+ name = 'core activity'
|
|
| 187 |
+ name = '{: <30}'.format(name)
|
|
| 192 | 188 |
|
| 193 |
- # Sneak the action name in with the element name
|
|
| 194 |
- action_name = message.action_name
|
|
| 195 | 189 |
if not action_name:
|
| 196 | 190 |
action_name = "Main"
|
| 197 | 191 |
|
| 198 | 192 |
return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \
|
| 199 |
- self.format_profile.fmt(':') + \
|
|
| 200 |
- self.content_profile.fmt(self._fmt_string.format(name))
|
|
| 193 |
+ self.format_profile.fmt(':') + self.content_profile.fmt(name)
|
|
| 201 | 194 |
|
| 202 | 195 |
|
| 203 | 196 |
# A widget for displaying the primary message text
|
| ... | ... | @@ -219,9 +212,12 @@ class CacheKey(Widget): |
| 219 | 212 |
def render(self, message):
|
| 220 | 213 |
|
| 221 | 214 |
element_id = message.task_id or message.unique_id
|
| 222 |
- if element_id is None or not self._key_length:
|
|
| 215 |
+ if not self._key_length:
|
|
| 223 | 216 |
return ""
|
| 224 | 217 |
|
| 218 |
+ if element_id is None:
|
|
| 219 |
+ return ' ' * self._key_length
|
|
| 220 |
+ |
|
| 225 | 221 |
missing = False
|
| 226 | 222 |
key = ' ' * self._key_length
|
| 227 | 223 |
plugin = _plugin_lookup(element_id)
|
| ... | ... | @@ -456,6 +452,7 @@ class LogLine(Widget): |
| 456 | 452 |
values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
|
| 457 | 453 |
values["Project"] = "{} ({})".format(project.name, project.directory)
|
| 458 | 454 |
values["Targets"] = ", ".join([t.name for t in stream.targets])
|
| 455 |
+ values["Cache Usage"] = "{}".format(context.get_artifact_cache_usage())
|
|
| 459 | 456 |
text += self._format_values(values)
|
| 460 | 457 |
|
| 461 | 458 |
# User configurations
|
| ... | ... | @@ -28,7 +28,20 @@ class CleanupJob(Job): |
| 28 | 28 |
self._artifacts = context.artifactcache
|
| 29 | 29 |
|
| 30 | 30 |
def child_process(self):
|
| 31 |
- return self._artifacts.clean()
|
|
| 31 |
+ def progress():
|
|
| 32 |
+ self.send_message('update-cache-size',
|
|
| 33 |
+ self._artifacts.get_cache_size())
|
|
| 34 |
+ return self._artifacts.clean(progress)
|
|
| 35 |
+ |
|
| 36 |
+ def handle_message(self, message_type, message):
|
|
| 37 |
+ |
|
| 38 |
+ # Update the cache size in the main process as we go,
|
|
| 39 |
+ # this provides better feedback in the UI.
|
|
| 40 |
+ if message_type == 'update-cache-size':
|
|
| 41 |
+ self._artifacts.set_cache_size(message)
|
|
| 42 |
+ return True
|
|
| 43 |
+ |
|
| 44 |
+ return False
|
|
| 32 | 45 |
|
| 33 | 46 |
def parent_complete(self, status, result):
|
| 34 | 47 |
if status == JobStatus.OK:
|
| ... | ... | @@ -58,10 +58,10 @@ class JobStatus(): |
| 58 | 58 |
|
| 59 | 59 |
|
| 60 | 60 |
# Used to distinguish between status messages and return values
|
| 61 |
-class Envelope():
|
|
| 61 |
+class _Envelope():
|
|
| 62 | 62 |
def __init__(self, message_type, message):
|
| 63 |
- self._message_type = message_type
|
|
| 64 |
- self._message = message
|
|
| 63 |
+ self.message_type = message_type
|
|
| 64 |
+ self.message = message
|
|
| 65 | 65 |
|
| 66 | 66 |
|
| 67 | 67 |
# Process class that doesn't call waitpid on its own.
|
| ... | ... | @@ -275,10 +275,37 @@ class Job(): |
| 275 | 275 |
def set_task_id(self, task_id):
|
| 276 | 276 |
self._task_id = task_id
|
| 277 | 277 |
|
| 278 |
+ # send_message()
|
|
| 279 |
+ #
|
|
| 280 |
+ # To be called from inside Job.child_process() implementations
|
|
| 281 |
+ # to send messages to the main process during processing.
|
|
| 282 |
+ #
|
|
| 283 |
+ # These messages will be processed by the class's Job.handle_message()
|
|
| 284 |
+ # implementation.
|
|
| 285 |
+ #
|
|
| 286 |
+ def send_message(self, message_type, message):
|
|
| 287 |
+ self._queue.put(_Envelope(message_type, message))
|
|
| 288 |
+ |
|
| 278 | 289 |
#######################################################
|
| 279 | 290 |
# Abstract Methods #
|
| 280 | 291 |
#######################################################
|
| 281 | 292 |
|
| 293 |
+ # handle_message()
|
|
| 294 |
+ #
|
|
| 295 |
+ # Handle a custom message. This will be called in the main process in
|
|
| 296 |
+ # response to any messages sent to the main proces using the
|
|
| 297 |
+ # Job.send_message() API from inside a Job.child_process() implementation
|
|
| 298 |
+ #
|
|
| 299 |
+ # Args:
|
|
| 300 |
+ # message_type (str): A string to identify the message type
|
|
| 301 |
+ # message (any): A simple serializable object
|
|
| 302 |
+ #
|
|
| 303 |
+ # Returns:
|
|
| 304 |
+ # (bool): Should return a truthy value if message_type is handled.
|
|
| 305 |
+ #
|
|
| 306 |
+ def handle_message(self, message_type, message):
|
|
| 307 |
+ return False
|
|
| 308 |
+ |
|
| 282 | 309 |
# parent_complete()
|
| 283 | 310 |
#
|
| 284 | 311 |
# This will be executed after the job finishes, and is expected to
|
| ... | ... | @@ -416,7 +443,7 @@ class Job(): |
| 416 | 443 |
elapsed=elapsed, detail=e.detail,
|
| 417 | 444 |
logfile=filename, sandbox=e.sandbox)
|
| 418 | 445 |
|
| 419 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
| 446 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
| 420 | 447 |
|
| 421 | 448 |
# Report the exception to the parent (for internal testing purposes)
|
| 422 | 449 |
self._child_send_error(e)
|
| ... | ... | @@ -442,7 +469,7 @@ class Job(): |
| 442 | 469 |
|
| 443 | 470 |
else:
|
| 444 | 471 |
# No exception occurred in the action
|
| 445 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
| 472 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
| 446 | 473 |
self._child_send_result(result)
|
| 447 | 474 |
|
| 448 | 475 |
elapsed = datetime.datetime.now() - starttime
|
| ... | ... | @@ -469,7 +496,7 @@ class Job(): |
| 469 | 496 |
domain = e.domain
|
| 470 | 497 |
reason = e.reason
|
| 471 | 498 |
|
| 472 |
- envelope = Envelope('error', {
|
|
| 499 |
+ envelope = _Envelope('error', {
|
|
| 473 | 500 |
'domain': domain,
|
| 474 | 501 |
'reason': reason
|
| 475 | 502 |
})
|
| ... | ... | @@ -487,7 +514,7 @@ class Job(): |
| 487 | 514 |
#
|
| 488 | 515 |
def _child_send_result(self, result):
|
| 489 | 516 |
if result is not None:
|
| 490 |
- envelope = Envelope('result', result)
|
|
| 517 |
+ envelope = _Envelope('result', result)
|
|
| 491 | 518 |
self._queue.put(envelope)
|
| 492 | 519 |
|
| 493 | 520 |
# _child_shutdown()
|
| ... | ... | @@ -524,7 +551,7 @@ class Job(): |
| 524 | 551 |
if message.message_type == MessageType.LOG:
|
| 525 | 552 |
return
|
| 526 | 553 |
|
| 527 |
- self._queue.put(Envelope('message', message))
|
|
| 554 |
+ self._queue.put(_Envelope('message', message))
|
|
| 528 | 555 |
|
| 529 | 556 |
# _parent_shutdown()
|
| 530 | 557 |
#
|
| ... | ... | @@ -588,24 +615,28 @@ class Job(): |
| 588 | 615 |
if not self._listening:
|
| 589 | 616 |
return
|
| 590 | 617 |
|
| 591 |
- if envelope._message_type == 'message':
|
|
| 618 |
+ if envelope.message_type == 'message':
|
|
| 592 | 619 |
# Propagate received messages from children
|
| 593 | 620 |
# back through the context.
|
| 594 |
- self._scheduler.context.message(envelope._message)
|
|
| 595 |
- elif envelope._message_type == 'error':
|
|
| 621 |
+ self._scheduler.context.message(envelope.message)
|
|
| 622 |
+ elif envelope.message_type == 'error':
|
|
| 596 | 623 |
# For regression tests only, save the last error domain / reason
|
| 597 | 624 |
# reported from a child task in the main process, this global state
|
| 598 | 625 |
# is currently managed in _exceptions.py
|
| 599 |
- set_last_task_error(envelope._message['domain'],
|
|
| 600 |
- envelope._message['reason'])
|
|
| 601 |
- elif envelope._message_type == 'result':
|
|
| 626 |
+ set_last_task_error(envelope.message['domain'],
|
|
| 627 |
+ envelope.message['reason'])
|
|
| 628 |
+ elif envelope.message_type == 'result':
|
|
| 602 | 629 |
assert self._result is None
|
| 603 |
- self._result = envelope._message
|
|
| 604 |
- elif envelope._message_type == 'child_data':
|
|
| 630 |
+ self._result = envelope.message
|
|
| 631 |
+ elif envelope.message_type == 'child_data':
|
|
| 605 | 632 |
# If we retry a job, we assign a new value to this
|
| 606 |
- self.child_data = envelope._message
|
|
| 607 |
- else:
|
|
| 608 |
- raise Exception()
|
|
| 633 |
+ self.child_data = envelope.message
|
|
| 634 |
+ |
|
| 635 |
+ # Try Job subclass specific messages now
|
|
| 636 |
+ elif not self.handle_message(envelope.message_type,
|
|
| 637 |
+ envelope.message):
|
|
| 638 |
+ assert 0, "Unhandled message type '{}': {}" \
|
|
| 639 |
+ .format(envelope.message_type, envelope.message)
|
|
| 609 | 640 |
|
| 610 | 641 |
# _parent_process_queue()
|
| 611 | 642 |
#
|
| ... | ... | @@ -163,4 +163,4 @@ class Resources(): |
| 163 | 163 |
def unregister_exclusive_interest(self, resources, source):
|
| 164 | 164 |
|
| 165 | 165 |
for resource in resources:
|
| 166 |
- self._exclusive_resources[resource].remove(source)
|
|
| 166 |
+ self._exclusive_resources[resource].discard(source)
|
| ... | ... | @@ -40,8 +40,8 @@ class SchedStatus(): |
| 40 | 40 |
|
| 41 | 41 |
# Some action names for the internal jobs we launch
|
| 42 | 42 |
#
|
| 43 |
-_ACTION_NAME_CLEANUP = 'cleanup'
|
|
| 44 |
-_ACTION_NAME_CACHE_SIZE = 'cache_size'
|
|
| 43 |
+_ACTION_NAME_CLEANUP = 'clean'
|
|
| 44 |
+_ACTION_NAME_CACHE_SIZE = 'size'
|
|
| 45 | 45 |
|
| 46 | 46 |
|
| 47 | 47 |
# Scheduler()
|
| ... | ... | @@ -151,6 +151,9 @@ class Scheduler(): |
| 151 | 151 |
# Handle unix signals while running
|
| 152 | 152 |
self._connect_signals()
|
| 153 | 153 |
|
| 154 |
+ # Check if we need to start with some cache maintenance
|
|
| 155 |
+ self._check_cache_management()
|
|
| 156 |
+ |
|
| 154 | 157 |
# Run the queues
|
| 155 | 158 |
self._sched()
|
| 156 | 159 |
self.loop.run_forever()
|
| ... | ... | @@ -272,6 +275,31 @@ class Scheduler(): |
| 272 | 275 |
# Local Private Methods #
|
| 273 | 276 |
#######################################################
|
| 274 | 277 |
|
| 278 |
+ # _check_cache_management()
|
|
| 279 |
+ #
|
|
| 280 |
+ # Run an initial check if we need to lock the cache
|
|
| 281 |
+ # resource and check the size and possibly launch
|
|
| 282 |
+ # a cleanup.
|
|
| 283 |
+ #
|
|
| 284 |
+ # Sessions which do not add to the cache are not affected.
|
|
| 285 |
+ #
|
|
| 286 |
+ def _check_cache_management(self):
|
|
| 287 |
+ |
|
| 288 |
+ # Only trigger the check for a scheduler run which has
|
|
| 289 |
+ # queues which require the CACHE resource.
|
|
| 290 |
+ if not any(q for q in self.queues
|
|
| 291 |
+ if ResourceType.CACHE in q.resources):
|
|
| 292 |
+ return
|
|
| 293 |
+ |
|
| 294 |
+ # If the estimated size outgrows the quota, queue a job to
|
|
| 295 |
+ # actually check the real cache size initially, this one
|
|
| 296 |
+ # should have exclusive access to the cache to ensure nothing
|
|
| 297 |
+ # starts while we are checking the cache.
|
|
| 298 |
+ #
|
|
| 299 |
+ artifacts = self.context.artifactcache
|
|
| 300 |
+ if artifacts.has_quota_exceeded():
|
|
| 301 |
+ self._sched_cache_size_job(exclusive=True)
|
|
| 302 |
+ |
|
| 275 | 303 |
# _spawn_job()
|
| 276 | 304 |
#
|
| 277 | 305 |
# Spanws a job
|
| ... | ... | @@ -292,6 +320,11 @@ class Scheduler(): |
| 292 | 320 |
self._cache_size_running = None
|
| 293 | 321 |
self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
|
| 294 | 322 |
|
| 323 |
+ # Unregister the exclusive interest if there was any
|
|
| 324 |
+ self.resources.unregister_exclusive_interest(
|
|
| 325 |
+ [ResourceType.CACHE], 'cache-size'
|
|
| 326 |
+ )
|
|
| 327 |
+ |
|
| 295 | 328 |
# Schedule a cleanup job if we've hit the threshold
|
| 296 | 329 |
if status != JobStatus.OK:
|
| 297 | 330 |
return
|
| ... | ... | @@ -344,11 +377,35 @@ class Scheduler(): |
| 344 | 377 |
# Runs a cache size job if one is scheduled to run now and
|
| 345 | 378 |
# sufficient recources are available.
|
| 346 | 379 |
#
|
| 347 |
- def _sched_cache_size_job(self):
|
|
| 380 |
+ # Args:
|
|
| 381 |
+ # exclusive (bool): Run a cache size job immediately and
|
|
| 382 |
+ # hold the ResourceType.CACHE resource
|
|
| 383 |
+ # exclusively (used at startup).
|
|
| 384 |
+ #
|
|
| 385 |
+ def _sched_cache_size_job(self, *, exclusive=False):
|
|
| 386 |
+ |
|
| 387 |
+ # The exclusive argument is not intended (or safe) for arbitrary use.
|
|
| 388 |
+ if exclusive:
|
|
| 389 |
+ assert not self._cache_size_scheduled
|
|
| 390 |
+ assert not self._cache_size_running
|
|
| 391 |
+ assert not self._active_jobs
|
|
| 392 |
+ self._cache_size_scheduled = True
|
|
| 348 | 393 |
|
| 349 | 394 |
if self._cache_size_scheduled and not self._cache_size_running:
|
| 350 | 395 |
|
| 351 |
- if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS]):
|
|
| 396 |
+ # Handle the exclusive launch
|
|
| 397 |
+ exclusive_resources = set()
|
|
| 398 |
+ if exclusive:
|
|
| 399 |
+ exclusive_resources.add(ResourceType.CACHE)
|
|
| 400 |
+ self.resources.register_exclusive_interest(
|
|
| 401 |
+ exclusive_resources, 'cache-size'
|
|
| 402 |
+ )
|
|
| 403 |
+ |
|
| 404 |
+ # Reserve the resources (with the possible exclusive cache resource)
|
|
| 405 |
+ if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
|
|
| 406 |
+ exclusive_resources):
|
|
| 407 |
+ |
|
| 408 |
+ # Update state and launch
|
|
| 352 | 409 |
self._cache_size_scheduled = False
|
| 353 | 410 |
self._cache_size_running = \
|
| 354 | 411 |
CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
|
| ... | ... | @@ -100,16 +100,19 @@ class Stream(): |
| 100 | 100 |
# targets (list of str): Targets to pull
|
| 101 | 101 |
# selection (PipelineSelection): The selection mode for the specified targets
|
| 102 | 102 |
# except_targets (list of str): Specified targets to except from fetching
|
| 103 |
+ # use_artifact_config (bool): If artifact remote config should be loaded
|
|
| 103 | 104 |
#
|
| 104 | 105 |
# Returns:
|
| 105 | 106 |
# (list of Element): The selected elements
|
| 106 | 107 |
def load_selection(self, targets, *,
|
| 107 | 108 |
selection=PipelineSelection.NONE,
|
| 108 |
- except_targets=()):
|
|
| 109 |
+ except_targets=(),
|
|
| 110 |
+ use_artifact_config=False):
|
|
| 109 | 111 |
elements, _ = self._load(targets, (),
|
| 110 | 112 |
selection=selection,
|
| 111 | 113 |
except_targets=except_targets,
|
| 112 |
- fetch_subprojects=False)
|
|
| 114 |
+ fetch_subprojects=False,
|
|
| 115 |
+ use_artifact_config=use_artifact_config)
|
|
| 113 | 116 |
return elements
|
| 114 | 117 |
|
| 115 | 118 |
# shell()
|
| ... | ... | @@ -124,7 +127,7 @@ class Stream(): |
| 124 | 127 |
# mounts (list of HostMount): Additional directories to mount into the sandbox
|
| 125 | 128 |
# isolate (bool): Whether to isolate the environment like we do in builds
|
| 126 | 129 |
# command (list): An argv to launch in the sandbox, or None
|
| 127 |
- # usebuildtree (bool): Wheather to use a buildtree as the source.
|
|
| 130 |
+ # usebuildtree (str): Whether to use a buildtree as the source, given cli option
|
|
| 128 | 131 |
#
|
| 129 | 132 |
# Returns:
|
| 130 | 133 |
# (int): The exit code of the launched shell
|
| ... | ... | @@ -134,7 +137,7 @@ class Stream(): |
| 134 | 137 |
mounts=None,
|
| 135 | 138 |
isolate=False,
|
| 136 | 139 |
command=None,
|
| 137 |
- usebuildtree=False):
|
|
| 140 |
+ usebuildtree=None):
|
|
| 138 | 141 |
|
| 139 | 142 |
# Assert we have everything we need built, unless the directory is specified
|
| 140 | 143 |
# in which case we just blindly trust the directory, using the element
|
| ... | ... | @@ -149,8 +152,31 @@ class Stream(): |
| 149 | 152 |
raise StreamError("Elements need to be built or downloaded before staging a shell environment",
|
| 150 | 153 |
detail="\n".join(missing_deps))
|
| 151 | 154 |
|
| 155 |
+ buildtree = False
|
|
| 156 |
+ # Check if we require a pull queue attempt, with given artifact state and context
|
|
| 157 |
+ if usebuildtree:
|
|
| 158 |
+ if not element._cached_buildtree():
|
|
| 159 |
+ require_buildtree = self._buildtree_pull_required([element])
|
|
| 160 |
+ # Attempt a pull queue for the given element if remote and context allow it
|
|
| 161 |
+ if require_buildtree:
|
|
| 162 |
+ self._message(MessageType.INFO, "Attempting to fetch missing artifact buildtree")
|
|
| 163 |
+ self._add_queue(PullQueue(self._scheduler))
|
|
| 164 |
+ self._enqueue_plan(require_buildtree)
|
|
| 165 |
+ self._run()
|
|
| 166 |
+ # Now check if the buildtree was successfully fetched
|
|
| 167 |
+ if element._cached_buildtree():
|
|
| 168 |
+ buildtree = True
|
|
| 169 |
+ if not buildtree:
|
|
| 170 |
+ if usebuildtree == "always":
|
|
| 171 |
+ raise StreamError("Buildtree is not cached locally or in available remotes")
|
|
| 172 |
+ else:
|
|
| 173 |
+ self._message(MessageType.INFO, """Buildtree is not cached locally or in available remotes,
|
|
| 174 |
+ shell will be loaded without it""")
|
|
| 175 |
+ else:
|
|
| 176 |
+ buildtree = True
|
|
| 177 |
+ |
|
| 152 | 178 |
return element._shell(scope, directory, mounts=mounts, isolate=isolate, prompt=prompt, command=command,
|
| 153 |
- usebuildtree=usebuildtree)
|
|
| 179 |
+ usebuildtree=buildtree)
|
|
| 154 | 180 |
|
| 155 | 181 |
# build()
|
| 156 | 182 |
#
|
| ... | ... | @@ -633,6 +633,27 @@ def _get_dir_size(path): |
| 633 | 633 |
return get_size(path)
|
| 634 | 634 |
|
| 635 | 635 |
|
| 636 |
+# _get_volume_size():
|
|
| 637 |
+#
|
|
| 638 |
+# Gets the overall usage and total size of a mounted filesystem in bytes.
|
|
| 639 |
+#
|
|
| 640 |
+# Args:
|
|
| 641 |
+# path (str): The path to check
|
|
| 642 |
+#
|
|
| 643 |
+# Returns:
|
|
| 644 |
+# (int): The total number of bytes on the volume
|
|
| 645 |
+# (int): The number of available bytes on the volume
|
|
| 646 |
+#
|
|
| 647 |
+def _get_volume_size(path):
|
|
| 648 |
+ try:
|
|
| 649 |
+ stat_ = os.statvfs(path)
|
|
| 650 |
+ except OSError as e:
|
|
| 651 |
+ raise UtilError("Failed to retrieve stats on volume for path '{}': {}"
|
|
| 652 |
+ .format(path, e)) from e
|
|
| 653 |
+ |
|
| 654 |
+ return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail
|
|
| 655 |
+ |
|
| 656 |
+ |
|
| 636 | 657 |
# _parse_size():
|
| 637 | 658 |
#
|
| 638 | 659 |
# Convert a string representing data size to a number of
|
| ... | ... | @@ -667,8 +688,7 @@ def _parse_size(size, volume): |
| 667 | 688 |
if num > 100:
|
| 668 | 689 |
raise UtilError("{}% is not a valid percentage value.".format(num))
|
| 669 | 690 |
|
| 670 |
- stat_ = os.statvfs(volume)
|
|
| 671 |
- disk_size = stat_.f_blocks * stat_.f_bsize
|
|
| 691 |
+ disk_size, _ = _get_volume_size(volume)
|
|
| 672 | 692 |
|
| 673 | 693 |
return disk_size * (num / 100)
|
| 674 | 694 |
|
| ... | ... | @@ -80,7 +80,7 @@ def test_quota_over_1024T(cli, tmpdir): |
| 80 | 80 |
_yaml.dump({'name': 'main'}, str(project.join("project.conf")))
|
| 81 | 81 |
|
| 82 | 82 |
volume_space_patch = mock.patch(
|
| 83 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
| 83 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
| 84 | 84 |
autospec=True,
|
| 85 | 85 |
return_value=(1025 * TiB, 1025 * TiB)
|
| 86 | 86 |
)
|
| ... | ... | @@ -18,6 +18,7 @@ |
| 18 | 18 |
#
|
| 19 | 19 |
|
| 20 | 20 |
import os
|
| 21 |
+import re
|
|
| 21 | 22 |
from unittest import mock
|
| 22 | 23 |
|
| 23 | 24 |
import pytest
|
| ... | ... | @@ -357,9 +358,9 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, err_domain, err_reas |
| 357 | 358 |
total_space = 10000
|
| 358 | 359 |
|
| 359 | 360 |
volume_space_patch = mock.patch(
|
| 360 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
| 361 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
| 361 | 362 |
autospec=True,
|
| 362 |
- return_value=(free_space, total_space),
|
|
| 363 |
+ return_value=(total_space, free_space),
|
|
| 363 | 364 |
)
|
| 364 | 365 |
|
| 365 | 366 |
cache_size_patch = mock.patch(
|
| ... | ... | @@ -425,3 +426,66 @@ def test_extract_expiry(cli, datafiles, tmpdir): |
| 425 | 426 |
|
| 426 | 427 |
assert os.path.isdir(refsdirtarget2)
|
| 427 | 428 |
assert not os.path.exists(refsdirtarget)
|
| 429 |
+ |
|
| 430 |
+ |
|
| 431 |
+# Ensures that when launching BuildStream with a full artifact cache,
|
|
| 432 |
+# the cache size and cleanup jobs are run before any other jobs.
|
|
| 433 |
+#
|
|
| 434 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
| 435 |
+def test_cleanup_first(cli, datafiles, tmpdir):
|
|
| 436 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
| 437 |
+ element_path = 'elements'
|
|
| 438 |
+ cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree')
|
|
| 439 |
+ checkout = os.path.join(project, 'checkout')
|
|
| 440 |
+ |
|
| 441 |
+ cli.configure({
|
|
| 442 |
+ 'cache': {
|
|
| 443 |
+ 'quota': 10000000,
|
|
| 444 |
+ }
|
|
| 445 |
+ })
|
|
| 446 |
+ |
|
| 447 |
+ # Create an element that uses almost the entire cache (an empty
|
|
| 448 |
+ # ostree cache starts at about ~10KiB, so we need a bit of a
|
|
| 449 |
+ # buffer)
|
|
| 450 |
+ create_element_size('target.bst', project, element_path, [], 8000000)
|
|
| 451 |
+ res = cli.run(project=project, args=['build', 'target.bst'])
|
|
| 452 |
+ res.assert_success()
|
|
| 453 |
+ |
|
| 454 |
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
| 455 |
+ |
|
| 456 |
+ # Now configure with a smaller quota, create a situation
|
|
| 457 |
+ # where the cache must be cleaned up before building anything else.
|
|
| 458 |
+ #
|
|
| 459 |
+ # Fix the fetchers and builders just to ensure a predictable
|
|
| 460 |
+ # sequence of events (although it does not effect this test)
|
|
| 461 |
+ cli.configure({
|
|
| 462 |
+ 'cache': {
|
|
| 463 |
+ 'quota': 5000000,
|
|
| 464 |
+ },
|
|
| 465 |
+ 'scheduler': {
|
|
| 466 |
+ 'fetchers': 1,
|
|
| 467 |
+ 'builders': 1
|
|
| 468 |
+ }
|
|
| 469 |
+ })
|
|
| 470 |
+ |
|
| 471 |
+ # Our cache is now more than full, BuildStream
|
|
| 472 |
+ create_element_size('target2.bst', project, element_path, [], 4000000)
|
|
| 473 |
+ res = cli.run(project=project, args=['build', 'target2.bst'])
|
|
| 474 |
+ res.assert_success()
|
|
| 475 |
+ |
|
| 476 |
+ # Find all of the activity (like push, pull, fetch) lines
|
|
| 477 |
+ results = re.findall(r'\[.*\]\[.*\]\[\s*(\S+):.*\]\s*START\s*.*\.log', res.stderr)
|
|
| 478 |
+ |
|
| 479 |
+ # Don't bother checking the order of 'fetch', it is allowed to start
|
|
| 480 |
+ # before or after the initial cache size job, runs in parallel, and does
|
|
| 481 |
+ # not require ResourceType.CACHE.
|
|
| 482 |
+ results.remove('fetch')
|
|
| 483 |
+ print(results)
|
|
| 484 |
+ |
|
| 485 |
+ # Assert the expected sequence of events
|
|
| 486 |
+ assert results == ['size', 'clean', 'build']
|
|
| 487 |
+ |
|
| 488 |
+ # Check that the correct element remains in the cache
|
|
| 489 |
+ states = cli.get_element_states(project, ['target.bst', 'target2.bst'])
|
|
| 490 |
+ assert states['target.bst'] != 'cached'
|
|
| 491 |
+ assert states['target2.bst'] == 'cached'
|
| ... | ... | @@ -41,7 +41,7 @@ def test_default_logging(cli, tmpdir, datafiles): |
| 41 | 41 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
| 42 | 42 |
result.assert_success()
|
| 43 | 43 |
|
| 44 |
- m = re.search(r"\[\d\d:\d\d:\d\d\]\[\]\[\] SUCCESS Checking sources", result.stderr)
|
|
| 44 |
+ m = re.search(r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr)
|
|
| 45 | 45 |
assert(m is not None)
|
| 46 | 46 |
|
| 47 | 47 |
|
| ... | ... | @@ -77,7 +77,7 @@ def test_custom_logging(cli, tmpdir, datafiles): |
| 77 | 77 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
| 78 | 78 |
result.assert_success()
|
| 79 | 79 |
|
| 80 |
- m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,,,SUCCESS,Checking sources", result.stderr)
|
|
| 80 |
+ m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\s*,.*,SUCCESS,Checking sources", result.stderr)
|
|
| 81 | 81 |
assert(m is not None)
|
| 82 | 82 |
|
| 83 | 83 |
|
| ... | ... | @@ -101,7 +101,7 @@ def test_buildtree_from_failure(cli_integration, tmpdir, datafiles): |
| 101 | 101 |
'shell', '--build', element_name, '--use-buildtree', 'always', '--', 'cat', 'test'
|
| 102 | 102 |
])
|
| 103 | 103 |
res.assert_success()
|
| 104 |
- assert "Warning: using a buildtree from a failed build" in res.output
|
|
| 104 |
+ assert "WARNING: using a buildtree from a failed build" in res.stderr
|
|
| 105 | 105 |
assert 'Hi' in res.output
|
| 106 | 106 |
|
| 107 | 107 |
|
| ... | ... | @@ -141,7 +141,7 @@ def test_buildtree_pulled(cli, tmpdir, datafiles): |
| 141 | 141 |
res.assert_success()
|
| 142 | 142 |
|
| 143 | 143 |
|
| 144 |
-# This test checks for correct behaviour if a buildtree is not present.
|
|
| 144 |
+# This test checks for correct behaviour if a buildtree is not present in the local cache.
|
|
| 145 | 145 |
@pytest.mark.datafiles(DATA_DIR)
|
| 146 | 146 |
@pytest.mark.skipif(IS_LINUX and not HAVE_BWRAP, reason='Only available with bubblewrap on Linux')
|
| 147 | 147 |
def test_buildtree_options(cli, tmpdir, datafiles):
|
| ... | ... | @@ -156,6 +156,7 @@ def test_buildtree_options(cli, tmpdir, datafiles): |
| 156 | 156 |
result = cli.run(project=project, args=['build', element_name])
|
| 157 | 157 |
result.assert_success()
|
| 158 | 158 |
assert cli.get_element_state(project, element_name) == 'cached'
|
| 159 |
+ assert share.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
| 159 | 160 |
|
| 160 | 161 |
# Discard the cache
|
| 161 | 162 |
cli.configure({
|
| ... | ... | @@ -168,8 +169,6 @@ def test_buildtree_options(cli, tmpdir, datafiles): |
| 168 | 169 |
result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', element_name])
|
| 169 | 170 |
result.assert_success()
|
| 170 | 171 |
|
| 171 |
- # The above is the simplest way I know to create a local cache without any buildtrees.
|
|
| 172 |
- |
|
| 173 | 172 |
# Check it's not using the cached build tree
|
| 174 | 173 |
res = cli.run(project=project, args=[
|
| 175 | 174 |
'shell', '--build', element_name, '--use-buildtree', 'never', '--', 'cat', 'test'
|
| ... | ... | @@ -177,24 +176,51 @@ def test_buildtree_options(cli, tmpdir, datafiles): |
| 177 | 176 |
res.assert_shell_error()
|
| 178 | 177 |
assert 'Hi' not in res.output
|
| 179 | 178 |
|
| 180 |
- # Check it's not correctly handling the lack of buildtree
|
|
| 179 |
+ # Check it's not using the cached build tree, default is to ask, and fall back to not
|
|
| 180 |
+ # for non interactive behavior
|
|
| 181 | 181 |
res = cli.run(project=project, args=[
|
| 182 |
- 'shell', '--build', element_name, '--use-buildtree', 'try', '--', 'cat', 'test'
|
|
| 182 |
+ 'shell', '--build', element_name, '--', 'cat', 'test'
|
|
| 183 | 183 |
])
|
| 184 | 184 |
res.assert_shell_error()
|
| 185 | 185 |
assert 'Hi' not in res.output
|
| 186 | 186 |
|
| 187 |
- # Check it's not using the cached build tree, default is to ask, and fall back to not
|
|
| 188 |
- # for non interactive behavior
|
|
| 187 |
+ # Check correctly handling the lack of buildtree, with 'try' not attempting to
|
|
| 188 |
+ # pull the buildtree as the user context is by default set to not pull them
|
|
| 189 | 189 |
res = cli.run(project=project, args=[
|
| 190 |
- 'shell', '--build', element_name, '--', 'cat', 'test'
|
|
| 190 |
+ 'shell', '--build', element_name, '--use-buildtree', 'try', '--', 'cat', 'test'
|
|
| 191 | 191 |
])
|
| 192 |
- res.assert_shell_error()
|
|
| 193 | 192 |
assert 'Hi' not in res.output
|
| 193 |
+ assert 'Attempting to fetch missing artifact buildtrees' not in res.stderr
|
|
| 194 |
+ assert """Buildtree is not cached locally or in available remotes,
|
|
| 195 |
+ shell will be loaded without it"""
|
|
| 194 | 196 |
|
| 195 |
- # Check it's using the cached build tree
|
|
| 197 |
+ # Check correctly handling the lack of buildtree, with 'try' attempting and succeeding
|
|
| 198 |
+ # to pull the buildtree as the user context allow the pulling of buildtrees and it is
|
|
| 199 |
+ # available in the remote
|
|
| 200 |
+ res = cli.run(project=project, args=[
|
|
| 201 |
+ '--pull-buildtrees', 'shell', '--build', element_name, '--use-buildtree', 'try', '--', 'cat', 'test'
|
|
| 202 |
+ ])
|
|
| 203 |
+ assert 'Attempting to fetch missing artifact buildtree' in res.stderr
|
|
| 204 |
+ assert 'Hi' in res.output
|
|
| 205 |
+ shutil.rmtree(os.path.join(os.path.join(cli.directory, 'artifacts2')))
|
|
| 206 |
+ assert cli.get_element_state(project, element_name) != 'cached'
|
|
| 207 |
+ |
|
| 208 |
+ # Check it's not loading the shell at all with always set for the buildtree, when the
|
|
| 209 |
+ # user context does not allow for buildtree pulling
|
|
| 210 |
+ result = cli.run(project=project, args=['artifact', 'pull', '--deps', 'all', element_name])
|
|
| 211 |
+ result.assert_success()
|
|
| 196 | 212 |
res = cli.run(project=project, args=[
|
| 197 | 213 |
'shell', '--build', element_name, '--use-buildtree', 'always', '--', 'cat', 'test'
|
| 198 | 214 |
])
|
| 199 | 215 |
res.assert_main_error(ErrorDomain.PROG_NOT_FOUND, None)
|
| 216 |
+ assert 'Buildtree is not cached locally or in available remotes' in res.stderr
|
|
| 200 | 217 |
assert 'Hi' not in res.output
|
| 218 |
+ assert 'Attempting to fetch missing artifact buildtree' not in res.stderr
|
|
| 219 |
+ |
|
| 220 |
+ # Check that when user context is set to pull buildtrees and a remote has the buildtree,
|
|
| 221 |
+ # 'always' will attempt and succeed at pulling the missing buildtree.
|
|
| 222 |
+ res = cli.run(project=project, args=[
|
|
| 223 |
+ '--pull-buildtrees', 'shell', '--build', element_name, '--use-buildtree', 'always', '--', 'cat', 'test'
|
|
| 224 |
+ ])
|
|
| 225 |
+ assert 'Hi' in res.output
|
|
| 226 |
+ assert 'Attempting to fetch missing artifact buildtree' in res.stderr
|
