Tristan Van Berkom pushed to branch tristan/cache-management at BuildStream / buildstream
Commits:
-
979ee6c8
by Tristan Van Berkom at 2019-01-20T16:10:00Z
-
e41f6f18
by Tristan Van Berkom at 2019-01-20T16:12:32Z
-
edb4933e
by Tristan Van Berkom at 2019-01-20T16:12:32Z
-
6c38435d
by Tristan Van Berkom at 2019-01-20T16:12:32Z
-
eb116357
by Tristan Van Berkom at 2019-01-20T16:12:32Z
-
1e64c557
by Tristan Van Berkom at 2019-01-20T17:38:22Z
-
dd838893
by Tristan Van Berkom at 2019-01-20T17:52:16Z
-
6eb816ec
by Tristan Van Berkom at 2019-01-20T19:36:51Z
-
a29cc9f6
by Tristan Van Berkom at 2019-01-20T19:37:41Z
-
ce18f167
by Tristan Van Berkom at 2019-01-20T19:39:12Z
8 changed files:
- buildstream/_artifactcache.py
- buildstream/_context.py
- buildstream/_frontend/widget.py
- buildstream/_scheduler/resources.py
- buildstream/_scheduler/scheduler.py
- buildstream/utils.py
- tests/artifactcache/cache_size.py
- tests/artifactcache/expiry.py
Changes:
... | ... | @@ -46,6 +46,30 @@ class ArtifactCacheSpec(CASRemoteSpec): |
46 | 46 |
pass
|
47 | 47 |
|
48 | 48 |
|
49 |
+# ArtifactCacheUsage
|
|
50 |
+#
|
|
51 |
+# A simple object to report the current artifact cache
|
|
52 |
+# usage details.
|
|
53 |
+#
|
|
54 |
+# Note that this uses the user configured cache quota
|
|
55 |
+# rather than the internal quota with protective headroom
|
|
56 |
+# removed, to provide a more sensible value to display to
|
|
57 |
+# the user.
|
|
58 |
+#
|
|
59 |
+# Args:
|
|
60 |
+# artifacts (ArtifactCache): The artifact cache to get the status of
|
|
61 |
+#
|
|
62 |
+class ArtifactCacheUsage():
|
|
63 |
+ |
|
64 |
+ def __init__(self, artifacts):
|
|
65 |
+ context = artifacts.context
|
|
66 |
+ self.quota_config = context.config_cache_quota # Configured quota
|
|
67 |
+ self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes
|
|
68 |
+ self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes
|
|
69 |
+ self.used_percent = None # Percentage of the quota used
|
|
70 |
+ self.used_percent = int(self.used_size * 100 / self.quota_size)
|
|
71 |
+ |
|
72 |
+ |
|
49 | 73 |
# An ArtifactCache manages artifacts.
|
50 | 74 |
#
|
51 | 75 |
# Args:
|
... | ... | @@ -64,6 +88,7 @@ class ArtifactCache(): |
64 | 88 |
self._required_elements = set() # The elements required for this session
|
65 | 89 |
self._cache_size = None # The current cache size, sometimes it's an estimate
|
66 | 90 |
self._cache_quota = None # The cache quota
|
91 |
+ self._cache_quota_original = None # The cache quota as specified by the user, in bytes
|
|
67 | 92 |
self._cache_lower_threshold = None # The target cache size for a cleanup
|
68 | 93 |
self._remotes_setup = False # Check to prevent double-setup of remotes
|
69 | 94 |
|
... | ... | @@ -307,7 +332,7 @@ class ArtifactCache(): |
307 | 332 |
# it is greater than the actual cache size.
|
308 | 333 |
#
|
309 | 334 |
# Returns:
|
310 |
- # (int) An approximation of the artifact cache size.
|
|
335 |
+ # (int) An approximation of the artifact cache size, in bytes.
|
|
311 | 336 |
#
|
312 | 337 |
def get_cache_size(self):
|
313 | 338 |
|
... | ... | @@ -848,19 +873,16 @@ class ArtifactCache(): |
848 | 873 |
else:
|
849 | 874 |
headroom = 2e9
|
850 | 875 |
|
851 |
- artifactdir_volume = self.context.artifactdir
|
|
852 |
- while not os.path.exists(artifactdir_volume):
|
|
853 |
- artifactdir_volume = os.path.dirname(artifactdir_volume)
|
|
854 |
- |
|
855 | 876 |
try:
|
856 |
- cache_quota = utils._parse_size(self.context.config_cache_quota, artifactdir_volume)
|
|
877 |
+ cache_quota = utils._parse_size(self.context.config_cache_quota,
|
|
878 |
+ self.context.artifactdir)
|
|
857 | 879 |
except utils.UtilError as e:
|
858 | 880 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
859 | 881 |
"{}\nPlease specify the value in bytes or as a % of full disk space.\n"
|
860 | 882 |
"\nValid values are, for example: 800M 10G 1T 50%\n"
|
861 | 883 |
.format(str(e))) from e
|
862 | 884 |
|
863 |
- available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
|
|
885 |
+ total_size, available_space = self._get_cache_volume_size()
|
|
864 | 886 |
cache_size = self.get_cache_size()
|
865 | 887 |
|
866 | 888 |
# Ensure system has enough storage for the cache_quota
|
... | ... | @@ -882,16 +904,16 @@ class ArtifactCache(): |
882 | 904 |
else:
|
883 | 905 |
available = utils._pretty_size(available_space)
|
884 | 906 |
|
885 |
- raise LoadError(LoadErrorReason.INVALID_DATA,
|
|
886 |
- ("Your system does not have enough available " +
|
|
887 |
- "space to support the cache quota specified.\n" +
|
|
888 |
- "\nYou have specified a quota of {quota} total disk space.\n" +
|
|
889 |
- "The filesystem containing {local_cache_path} only " +
|
|
890 |
- "has: {available_size} available.")
|
|
891 |
- .format(
|
|
892 |
- quota=self.context.config_cache_quota,
|
|
893 |
- local_cache_path=self.context.artifactdir,
|
|
894 |
- available_size=available))
|
|
907 |
+ raise ArtifactError("Your system does not have enough available " +
|
|
908 |
+ "space to support the cache quota specified.",
|
|
909 |
+ detail=("You have specified a quota of {quota} total disk space.\n" +
|
|
910 |
+ "The filesystem containing {local_cache_path} only " +
|
|
911 |
+ "has {available_size} available.")
|
|
912 |
+ .format(
|
|
913 |
+ quota=self.context.config_cache_quota,
|
|
914 |
+ local_cache_path=self.context.artifactdir,
|
|
915 |
+ available_size=available),
|
|
916 |
+ reason='insufficient-storage-for-quota')
|
|
895 | 917 |
|
896 | 918 |
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
|
897 | 919 |
# cache_quota to try and avoid exceptions.
|
... | ... | @@ -900,22 +922,25 @@ class ArtifactCache(): |
900 | 922 |
# if we end up writing more than 2G, but hey, this stuff is
|
901 | 923 |
# already really fuzzy.
|
902 | 924 |
#
|
925 |
+ self._cache_quota_original = cache_quota
|
|
903 | 926 |
self._cache_quota = cache_quota - headroom
|
904 | 927 |
self._cache_lower_threshold = self._cache_quota / 2
|
905 | 928 |
|
906 |
- # _get_volume_space_info_for
|
|
929 |
+ # _get_cache_volume_size()
|
|
907 | 930 |
#
|
908 |
- # Get the available space and total space for the given volume
|
|
909 |
- #
|
|
910 |
- # Args:
|
|
911 |
- # volume: volume for which to get the size
|
|
931 |
+ # Get the available space and total space for the volume on
|
|
932 |
+ # which the artifact cache is located.
|
|
912 | 933 |
#
|
913 | 934 |
# Returns:
|
914 |
- # A tuple containing first the availabe number of bytes on the requested
|
|
915 |
- # volume, then the total number of bytes of the volume.
|
|
916 |
- def _get_volume_space_info_for(self, volume):
|
|
917 |
- stat = os.statvfs(volume)
|
|
918 |
- return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
|
|
935 |
+ # (int): The total number of bytes on the volume
|
|
936 |
+ # (int): The number of available bytes on the volume
|
|
937 |
+ #
|
|
938 |
+ # NOTE: We use this stub to allow the test cases
|
|
939 |
+ # to override what an artifact cache thinks
|
|
940 |
+ # about it's disk size and available bytes.
|
|
941 |
+ #
|
|
942 |
+ def _get_cache_volume_size(self):
|
|
943 |
+ return utils._get_volume_size(self.context.artifactdir)
|
|
919 | 944 |
|
920 | 945 |
|
921 | 946 |
# _configured_remote_artifact_cache_specs():
|
... | ... | @@ -30,7 +30,7 @@ from . import _yaml |
30 | 30 |
from ._exceptions import LoadError, LoadErrorReason, BstError
|
31 | 31 |
from ._message import Message, MessageType
|
32 | 32 |
from ._profile import Topics, profile_start, profile_end
|
33 |
-from ._artifactcache import ArtifactCache
|
|
33 |
+from ._artifactcache import ArtifactCache, ArtifactCacheUsage
|
|
34 | 34 |
from ._cas import CASCache
|
35 | 35 |
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE
|
36 | 36 |
from .plugin import _plugin_lookup
|
... | ... | @@ -289,6 +289,16 @@ class Context(): |
289 | 289 |
|
290 | 290 |
return self._artifactcache
|
291 | 291 |
|
292 |
+ # get_artifact_cache_usage()
|
|
293 |
+ #
|
|
294 |
+ # Fetches the current usage of the artifact cache
|
|
295 |
+ #
|
|
296 |
+ # Returns:
|
|
297 |
+ # (ArtifactCacheUsage): The current status
|
|
298 |
+ #
|
|
299 |
+ def get_artifact_cache_usage(self):
|
|
300 |
+ return ArtifactCacheUsage(self.artifactcache)
|
|
301 |
+ |
|
292 | 302 |
# add_project():
|
293 | 303 |
#
|
294 | 304 |
# Add a project to the context.
|
... | ... | @@ -33,6 +33,7 @@ from .. import __version__ as bst_version |
33 | 33 |
from .._exceptions import ImplError
|
34 | 34 |
from .._message import MessageType
|
35 | 35 |
from ..plugin import _plugin_lookup
|
36 |
+from .. import utils
|
|
36 | 37 |
|
37 | 38 |
|
38 | 39 |
# These messages are printed a bit differently
|
... | ... | @@ -178,26 +179,22 @@ class ElementName(Widget): |
178 | 179 |
def __init__(self, context, content_profile, format_profile):
|
179 | 180 |
super(ElementName, self).__init__(context, content_profile, format_profile)
|
180 | 181 |
|
181 |
- # Pre initialization format string, before we know the length of
|
|
182 |
- # element names in the pipeline
|
|
183 |
- self._fmt_string = '{: <30}'
|
|
184 |
- |
|
185 | 182 |
def render(self, message):
|
183 |
+ action_name = message.action_name
|
|
186 | 184 |
element_id = message.task_id or message.unique_id
|
187 |
- if element_id is None:
|
|
188 |
- return ""
|
|
189 |
- |
|
190 |
- plugin = _plugin_lookup(element_id)
|
|
191 |
- name = plugin._get_full_name()
|
|
185 |
+ if element_id is not None:
|
|
186 |
+ plugin = _plugin_lookup(element_id)
|
|
187 |
+ name = plugin._get_full_name()
|
|
188 |
+ name = '{: <30}'.format(name)
|
|
189 |
+ else:
|
|
190 |
+ name = 'core activity'
|
|
191 |
+ name = '{: <30}'.format(name)
|
|
192 | 192 |
|
193 |
- # Sneak the action name in with the element name
|
|
194 |
- action_name = message.action_name
|
|
195 | 193 |
if not action_name:
|
196 | 194 |
action_name = "Main"
|
197 | 195 |
|
198 | 196 |
return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \
|
199 |
- self.format_profile.fmt(':') + \
|
|
200 |
- self.content_profile.fmt(self._fmt_string.format(name))
|
|
197 |
+ self.format_profile.fmt(':') + self.content_profile.fmt(name)
|
|
201 | 198 |
|
202 | 199 |
|
203 | 200 |
# A widget for displaying the primary message text
|
... | ... | @@ -219,9 +216,12 @@ class CacheKey(Widget): |
219 | 216 |
def render(self, message):
|
220 | 217 |
|
221 | 218 |
element_id = message.task_id or message.unique_id
|
222 |
- if element_id is None or not self._key_length:
|
|
219 |
+ if not self._key_length:
|
|
223 | 220 |
return ""
|
224 | 221 |
|
222 |
+ if element_id is None:
|
|
223 |
+ return ' ' * self._key_length
|
|
224 |
+ |
|
225 | 225 |
missing = False
|
226 | 226 |
key = ' ' * self._key_length
|
227 | 227 |
plugin = _plugin_lookup(element_id)
|
... | ... | @@ -450,12 +450,16 @@ class LogLine(Widget): |
450 | 450 |
self._resolved_keys = {element: element._get_cache_key() for element in stream.session_elements}
|
451 | 451 |
|
452 | 452 |
# Main invocation context
|
453 |
+ usage = context.get_artifact_cache_usage()
|
|
453 | 454 |
text += '\n'
|
454 | 455 |
text += self.content_profile.fmt("BuildStream Version {}\n".format(bst_version), bold=True)
|
455 | 456 |
values = OrderedDict()
|
456 | 457 |
values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
|
457 | 458 |
values["Project"] = "{} ({})".format(project.name, project.directory)
|
458 | 459 |
values["Targets"] = ", ".join([t.name for t in stream.targets])
|
460 |
+ values["Cache Usage"] = "{} / {} ({}%)".format(
|
|
461 |
+ utils._pretty_size(usage.used_size, dec_places=1),
|
|
462 |
+ usage.quota_config, usage.used_percent)
|
|
459 | 463 |
text += self._format_values(values)
|
460 | 464 |
|
461 | 465 |
# User configurations
|
... | ... | @@ -163,4 +163,4 @@ class Resources(): |
163 | 163 |
def unregister_exclusive_interest(self, resources, source):
|
164 | 164 |
|
165 | 165 |
for resource in resources:
|
166 |
- self._exclusive_resources[resource].remove(source)
|
|
166 |
+ self._exclusive_resources[resource].discard(source)
|
... | ... | @@ -40,8 +40,8 @@ class SchedStatus(): |
40 | 40 |
|
41 | 41 |
# Some action names for the internal jobs we launch
|
42 | 42 |
#
|
43 |
-_ACTION_NAME_CLEANUP = 'cleanup'
|
|
44 |
-_ACTION_NAME_CACHE_SIZE = 'cache_size'
|
|
43 |
+_ACTION_NAME_CLEANUP = 'clean'
|
|
44 |
+_ACTION_NAME_CACHE_SIZE = 'size'
|
|
45 | 45 |
|
46 | 46 |
|
47 | 47 |
# Scheduler()
|
... | ... | @@ -151,6 +151,9 @@ class Scheduler(): |
151 | 151 |
# Handle unix signals while running
|
152 | 152 |
self._connect_signals()
|
153 | 153 |
|
154 |
+ # Check if we need to start with some cache maintenance
|
|
155 |
+ self._check_cache_management()
|
|
156 |
+ |
|
154 | 157 |
# Run the queues
|
155 | 158 |
self._sched()
|
156 | 159 |
self.loop.run_forever()
|
... | ... | @@ -272,6 +275,31 @@ class Scheduler(): |
272 | 275 |
# Local Private Methods #
|
273 | 276 |
#######################################################
|
274 | 277 |
|
278 |
+ # _check_cache_management()
|
|
279 |
+ #
|
|
280 |
+ # Run an initial check if we need to lock the cache
|
|
281 |
+ # resource and check the size and possibly launch
|
|
282 |
+ # a cleanup.
|
|
283 |
+ #
|
|
284 |
+ # Sessions which do not add to the cache are not affected.
|
|
285 |
+ #
|
|
286 |
+ def _check_cache_management(self):
|
|
287 |
+ |
|
288 |
+ # Only trigger the check for a scheduler run which has
|
|
289 |
+ # queues which require the CACHE resource.
|
|
290 |
+ if not any(q for q in self.queues
|
|
291 |
+ if ResourceType.CACHE in q.resources):
|
|
292 |
+ return
|
|
293 |
+ |
|
294 |
+ # If the estimated size outgrows the quota, queue a job to
|
|
295 |
+ # actually check the real cache size initially, this one
|
|
296 |
+ # should have exclusive access to the cache to ensure nothing
|
|
297 |
+ # starts while we are checking the cache.
|
|
298 |
+ #
|
|
299 |
+ artifacts = self.context.artifactcache
|
|
300 |
+ if artifacts.has_quota_exceeded():
|
|
301 |
+ self._sched_cache_size_job(exclusive=True)
|
|
302 |
+ |
|
275 | 303 |
# _spawn_job()
|
276 | 304 |
#
|
277 | 305 |
# Spanws a job
|
... | ... | @@ -292,6 +320,11 @@ class Scheduler(): |
292 | 320 |
self._cache_size_running = None
|
293 | 321 |
self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
|
294 | 322 |
|
323 |
+ # Unregister the exclusive interest if there was any
|
|
324 |
+ self.resources.unregister_exclusive_interest(
|
|
325 |
+ [ResourceType.CACHE], 'cache-size'
|
|
326 |
+ )
|
|
327 |
+ |
|
295 | 328 |
# Schedule a cleanup job if we've hit the threshold
|
296 | 329 |
if status != JobStatus.OK:
|
297 | 330 |
return
|
... | ... | @@ -344,11 +377,35 @@ class Scheduler(): |
344 | 377 |
# Runs a cache size job if one is scheduled to run now and
|
345 | 378 |
# sufficient recources are available.
|
346 | 379 |
#
|
347 |
- def _sched_cache_size_job(self):
|
|
380 |
+ # Args:
|
|
381 |
+ # exclusive (bool): Run a cache size job immediately and
|
|
382 |
+ # hold the ResourceType.CACHE resource
|
|
383 |
+ # exclusively (used at startup).
|
|
384 |
+ #
|
|
385 |
+ def _sched_cache_size_job(self, *, exclusive=False):
|
|
386 |
+ |
|
387 |
+ # The exclusive argument is not intended (or safe) for arbitrary use.
|
|
388 |
+ if exclusive:
|
|
389 |
+ assert not self._cache_size_scheduled
|
|
390 |
+ assert not self._cache_size_running
|
|
391 |
+ assert not self._active_jobs
|
|
392 |
+ self._cache_size_scheduled = True
|
|
348 | 393 |
|
349 | 394 |
if self._cache_size_scheduled and not self._cache_size_running:
|
350 | 395 |
|
351 |
- if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS]):
|
|
396 |
+ # Handle the exclusive launch
|
|
397 |
+ exclusive_resources = set()
|
|
398 |
+ if exclusive:
|
|
399 |
+ exclusive_resources.add(ResourceType.CACHE)
|
|
400 |
+ self.resources.register_exclusive_interest(
|
|
401 |
+ exclusive_resources, 'cache-size'
|
|
402 |
+ )
|
|
403 |
+ |
|
404 |
+ # Reserve the resources (with the possible exclusive cache resource)
|
|
405 |
+ if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
|
|
406 |
+ exclusive_resources):
|
|
407 |
+ |
|
408 |
+ # Update state and launch
|
|
352 | 409 |
self._cache_size_scheduled = False
|
353 | 410 |
self._cache_size_running = \
|
354 | 411 |
CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
|
... | ... | @@ -633,6 +633,27 @@ def _get_dir_size(path): |
633 | 633 |
return get_size(path)
|
634 | 634 |
|
635 | 635 |
|
636 |
+# _get_volume_size():
|
|
637 |
+#
|
|
638 |
+# Gets the overall usage and total size of a mounted filesystem in bytes.
|
|
639 |
+#
|
|
640 |
+# Args:
|
|
641 |
+# path (str): The path to check
|
|
642 |
+#
|
|
643 |
+# Returns:
|
|
644 |
+# (int): The total number of bytes on the volume
|
|
645 |
+# (int): The number of available bytes on the volume
|
|
646 |
+#
|
|
647 |
+def _get_volume_size(path):
|
|
648 |
+ try:
|
|
649 |
+ stat_ = os.statvfs(path)
|
|
650 |
+ except OSError as e:
|
|
651 |
+ raise UtilError("Failed to retrieve stats on volume for path '{}': {}"
|
|
652 |
+ .format(path, e)) from e
|
|
653 |
+ |
|
654 |
+ return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail
|
|
655 |
+ |
|
656 |
+ |
|
636 | 657 |
# _parse_size():
|
637 | 658 |
#
|
638 | 659 |
# Convert a string representing data size to a number of
|
... | ... | @@ -667,8 +688,7 @@ def _parse_size(size, volume): |
667 | 688 |
if num > 100:
|
668 | 689 |
raise UtilError("{}% is not a valid percentage value.".format(num))
|
669 | 690 |
|
670 |
- stat_ = os.statvfs(volume)
|
|
671 |
- disk_size = stat_.f_blocks * stat_.f_bsize
|
|
691 |
+ disk_size, _ = _get_volume_size(volume)
|
|
672 | 692 |
|
673 | 693 |
return disk_size * (num / 100)
|
674 | 694 |
|
... | ... | @@ -4,6 +4,7 @@ from unittest import mock |
4 | 4 |
|
5 | 5 |
from buildstream import _yaml
|
6 | 6 |
from buildstream._artifactcache import CACHE_SIZE_FILE
|
7 |
+from buildstream._exceptions import ErrorDomain
|
|
7 | 8 |
|
8 | 9 |
from tests.testutils import cli, create_element_size
|
9 | 10 |
|
... | ... | @@ -79,12 +80,11 @@ def test_quota_over_1024T(cli, tmpdir): |
79 | 80 |
_yaml.dump({'name': 'main'}, str(project.join("project.conf")))
|
80 | 81 |
|
81 | 82 |
volume_space_patch = mock.patch(
|
82 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
83 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
83 | 84 |
autospec=True,
|
84 | 85 |
return_value=(1025 * TiB, 1025 * TiB)
|
85 | 86 |
)
|
86 | 87 |
|
87 | 88 |
with volume_space_patch:
|
88 | 89 |
result = cli.run(project, args=["build", "file.bst"])
|
89 |
- failure_msg = 'Your system does not have enough available space to support the cache quota specified.'
|
|
90 |
- assert failure_msg in result.stderr
|
|
90 |
+ result.assert_main_error(ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota')
|
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 |
#
|
19 | 19 |
|
20 | 20 |
import os
|
21 |
+import re
|
|
21 | 22 |
from unittest import mock
|
22 | 23 |
|
23 | 24 |
import pytest
|
... | ... | @@ -304,20 +305,28 @@ def test_never_delete_required_track(cli, datafiles, tmpdir): |
304 | 305 |
|
305 | 306 |
# Ensure that only valid cache quotas make it through the loading
|
306 | 307 |
# process.
|
307 |
-@pytest.mark.parametrize("quota,success", [
|
|
308 |
- ("1", True),
|
|
309 |
- ("1K", True),
|
|
310 |
- ("50%", True),
|
|
311 |
- ("infinity", True),
|
|
312 |
- ("0", True),
|
|
313 |
- ("-1", False),
|
|
314 |
- ("pony", False),
|
|
315 |
- ("7K", False),
|
|
316 |
- ("70%", False),
|
|
317 |
- ("200%", False)
|
|
308 |
+#
|
|
309 |
+# This test virtualizes the condition to assume a storage volume
|
|
310 |
+# has 10K total disk space, and 6K of it is already in use (not
|
|
311 |
+# including any space used by the artifact cache).
|
|
312 |
+#
|
|
313 |
+@pytest.mark.parametrize("quota,err_domain,err_reason", [
|
|
314 |
+ # Valid configurations
|
|
315 |
+ ("1", 'success', None),
|
|
316 |
+ ("1K", 'success', None),
|
|
317 |
+ ("50%", 'success', None),
|
|
318 |
+ ("infinity", 'success', None),
|
|
319 |
+ ("0", 'success', None),
|
|
320 |
+ # Invalid configurations
|
|
321 |
+ ("-1", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
322 |
+ ("pony", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
323 |
+ ("200%", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
324 |
+ # Not enough space for these caches
|
|
325 |
+ ("7K", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota'),
|
|
326 |
+ ("70%", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota')
|
|
318 | 327 |
])
|
319 | 328 |
@pytest.mark.datafiles(DATA_DIR)
|
320 |
-def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success):
|
|
329 |
+def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, err_domain, err_reason):
|
|
321 | 330 |
project = os.path.join(datafiles.dirname, datafiles.basename)
|
322 | 331 |
os.makedirs(os.path.join(project, 'elements'))
|
323 | 332 |
|
... | ... | @@ -342,9 +351,9 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success): |
342 | 351 |
total_space = 10000
|
343 | 352 |
|
344 | 353 |
volume_space_patch = mock.patch(
|
345 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
354 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
346 | 355 |
autospec=True,
|
347 |
- return_value=(free_space, total_space),
|
|
356 |
+ return_value=(total_space, free_space),
|
|
348 | 357 |
)
|
349 | 358 |
|
350 | 359 |
cache_size_patch = mock.patch(
|
... | ... | @@ -356,10 +365,10 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success): |
356 | 365 |
with volume_space_patch, cache_size_patch:
|
357 | 366 |
res = cli.run(project=project, args=['workspace', 'list'])
|
358 | 367 |
|
359 |
- if success:
|
|
368 |
+ if err_domain == 'success':
|
|
360 | 369 |
res.assert_success()
|
361 | 370 |
else:
|
362 |
- res.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
|
|
371 |
+ res.assert_main_error(err_domain, err_reason)
|
|
363 | 372 |
|
364 | 373 |
|
365 | 374 |
@pytest.mark.datafiles(DATA_DIR)
|
... | ... | @@ -409,3 +418,65 @@ def test_extract_expiry(cli, datafiles, tmpdir): |
409 | 418 |
|
410 | 419 |
assert os.path.isdir(refsdirtarget2)
|
411 | 420 |
assert not os.path.exists(refsdirtarget)
|
421 |
+ |
|
422 |
+ |
|
423 |
+# Ensures that when launching BuildStream with a full artifact cache,
|
|
424 |
+# the cache size and cleanup jobs are run before any other jobs.
|
|
425 |
+#
|
|
426 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
427 |
+def test_cleanup_first(cli, datafiles, tmpdir):
|
|
428 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
429 |
+ element_path = 'elements'
|
|
430 |
+ cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree')
|
|
431 |
+ checkout = os.path.join(project, 'checkout')
|
|
432 |
+ |
|
433 |
+ cli.configure({
|
|
434 |
+ 'cache': {
|
|
435 |
+ 'quota': 10000000,
|
|
436 |
+ }
|
|
437 |
+ })
|
|
438 |
+ |
|
439 |
+ # Create an element that uses almost the entire cache (an empty
|
|
440 |
+ # ostree cache starts at about ~10KiB, so we need a bit of a
|
|
441 |
+ # buffer)
|
|
442 |
+ create_element_size('target.bst', project, element_path, [], 8000000)
|
|
443 |
+ res = cli.run(project=project, args=['build', 'target.bst'])
|
|
444 |
+ res.assert_success()
|
|
445 |
+ |
|
446 |
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
447 |
+ |
|
448 |
+ # Now configure with a smaller quota, create a situation
|
|
449 |
+ # where the cache must be cleaned up before building anything else.
|
|
450 |
+ #
|
|
451 |
+ # Fix the fetchers and builders just to ensure a predictable
|
|
452 |
+ # sequence of events (although it does not effect this test)
|
|
453 |
+ cli.configure({
|
|
454 |
+ 'cache': {
|
|
455 |
+ 'quota': 5000000,
|
|
456 |
+ },
|
|
457 |
+ 'scheduler': {
|
|
458 |
+ 'fetchers': 1,
|
|
459 |
+ 'builders': 1
|
|
460 |
+ }
|
|
461 |
+ })
|
|
462 |
+ |
|
463 |
+ # Our cache is now more than full, BuildStream
|
|
464 |
+ create_element_size('target2.bst', project, element_path, [], 4000000)
|
|
465 |
+ res = cli.run(project=project, args=['build', 'target2.bst'])
|
|
466 |
+ res.assert_success()
|
|
467 |
+ |
|
468 |
+ # Find all of the activity (like push, pull, fetch) lines
|
|
469 |
+ results = re.findall(r'\[.*\]\[.*\]\[\s*(\S+):.*\]\s*START\s*.*\.log', res.stderr)
|
|
470 |
+ |
|
471 |
+ # Don't bother checking the order of 'fetch', it is allowed to start
|
|
472 |
+ # before or after the initial cache size job, runs in parallel, and does
|
|
473 |
+ # not require ResourceType.CACHE.
|
|
474 |
+ results.remove('fetch')
|
|
475 |
+ print(results)
|
|
476 |
+ |
|
477 |
+ # Assert the expected sequence of events
|
|
478 |
+ assert results == ['size', 'clean', 'build']
|
|
479 |
+ |
|
480 |
+ # Check that the correct element remains in the cache
|
|
481 |
+ assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
482 |
+ assert cli.get_element_state(project, 'target2.bst') == 'cached'
|