Tristan Van Berkom pushed to branch tristan/cache-management at BuildStream / buildstream
Commits:
-
3315b9a1
by Tristan Van Berkom at 2019-01-22T18:46:24Z
-
56e857f4
by Tristan Van Berkom at 2019-01-22T19:59:57Z
-
221985a5
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
708cd241
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
3e80bb6c
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
3f4a68d5
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
e8546e7e
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
2340ff99
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
2fc90358
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
6eaefa5d
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
ac58539d
by Tristan Van Berkom at 2019-01-22T20:00:35Z
-
7844cb24
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
c1dbc2e1
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
23028c29
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
06bcda04
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
f8015b38
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
f53b9cc3
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
45e3a8fd
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
f4818834
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
8c150b5d
by Tristan Van Berkom at 2019-01-22T20:00:36Z
-
7613777f
by Tristan Van Berkom at 2019-01-22T20:00:36Z
17 changed files:
- buildstream/_artifactcache.py
- buildstream/_context.py
- buildstream/_frontend/app.py
- buildstream/_frontend/status.py
- buildstream/_frontend/widget.py
- buildstream/_scheduler/jobs/cleanupjob.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/resources.py
- buildstream/_scheduler/scheduler.py
- buildstream/utils.py
- tests/artifactcache/cache_size.py
- tests/artifactcache/expiry.py
- tests/frontend/logging.py
- tests/frontend/track.py
- tests/integration/pullbuildtrees.py
- − tests/internals/utils.py
- tests/testutils/runcli.py
Changes:
... | ... | @@ -46,6 +46,39 @@ class ArtifactCacheSpec(CASRemoteSpec): |
46 | 46 |
pass
|
47 | 47 |
|
48 | 48 |
|
49 |
+# ArtifactCacheUsage
|
|
50 |
+#
|
|
51 |
+# A simple object to report the current artifact cache
|
|
52 |
+# usage details.
|
|
53 |
+#
|
|
54 |
+# Note that this uses the user configured cache quota
|
|
55 |
+# rather than the internal quota with protective headroom
|
|
56 |
+# removed, to provide a more sensible value to display to
|
|
57 |
+# the user.
|
|
58 |
+#
|
|
59 |
+# Args:
|
|
60 |
+# artifacts (ArtifactCache): The artifact cache to get the status of
|
|
61 |
+#
|
|
62 |
+class ArtifactCacheUsage():
|
|
63 |
+ |
|
64 |
+ def __init__(self, artifacts):
|
|
65 |
+ context = artifacts.context
|
|
66 |
+ self.quota_config = context.config_cache_quota # Configured quota
|
|
67 |
+ self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes
|
|
68 |
+ self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes
|
|
69 |
+ self.used_percent = 0 # Percentage of the quota used
|
|
70 |
+ if self.quota_size is not None:
|
|
71 |
+ self.used_percent = int(self.used_size * 100 / self.quota_size)
|
|
72 |
+ |
|
73 |
+ # Formattable into a human readable string
|
|
74 |
+ #
|
|
75 |
+ def __str__(self):
|
|
76 |
+ return "{} / {} ({}%)" \
|
|
77 |
+ .format(utils._pretty_size(self.used_size, dec_places=1),
|
|
78 |
+ self.quota_config,
|
|
79 |
+ self.used_percent)
|
|
80 |
+ |
|
81 |
+ |
|
49 | 82 |
# An ArtifactCache manages artifacts.
|
50 | 83 |
#
|
51 | 84 |
# Args:
|
... | ... | @@ -64,6 +97,7 @@ class ArtifactCache(): |
64 | 97 |
self._required_elements = set() # The elements required for this session
|
65 | 98 |
self._cache_size = None # The current cache size, sometimes it's an estimate
|
66 | 99 |
self._cache_quota = None # The cache quota
|
100 |
+ self._cache_quota_original = None # The cache quota as specified by the user, in bytes
|
|
67 | 101 |
self._cache_lower_threshold = None # The target cache size for a cleanup
|
68 | 102 |
self._remotes_setup = False # Check to prevent double-setup of remotes
|
69 | 103 |
|
... | ... | @@ -216,11 +250,33 @@ class ArtifactCache(): |
216 | 250 |
#
|
217 | 251 |
# Clean the artifact cache as much as possible.
|
218 | 252 |
#
|
253 |
+ # Args:
|
|
254 |
+ # progress (callable): A callback to call when a ref is removed
|
|
255 |
+ #
|
|
219 | 256 |
# Returns:
|
220 | 257 |
# (int): The size of the cache after having cleaned up
|
221 | 258 |
#
|
222 |
- def clean(self):
|
|
259 |
+ def clean(self, progress=None):
|
|
223 | 260 |
artifacts = self.list_artifacts()
|
261 |
+ context = self.context
|
|
262 |
+ |
|
263 |
+ # Some accumulative statistics
|
|
264 |
+ removed_ref_count = 0
|
|
265 |
+ space_saved = 0
|
|
266 |
+ |
|
267 |
+ # Start off with an announcement with as much info as possible
|
|
268 |
+ volume_size, volume_avail = self._get_cache_volume_size()
|
|
269 |
+ self._message(MessageType.STATUS, "Starting cache cleanup",
|
|
270 |
+ detail=("Elements required by the current build plan: {}\n" +
|
|
271 |
+ "User specified quota: {} ({})\n" +
|
|
272 |
+ "Cache usage: {}\n" +
|
|
273 |
+ "Cache volume: {} total, {} available")
|
|
274 |
+ .format(len(self._required_elements),
|
|
275 |
+ context.config_cache_quota,
|
|
276 |
+ utils._pretty_size(self._cache_quota_original, dec_places=2),
|
|
277 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
|
|
278 |
+ utils._pretty_size(volume_size, dec_places=2),
|
|
279 |
+ utils._pretty_size(volume_avail, dec_places=2)))
|
|
224 | 280 |
|
225 | 281 |
# Build a set of the cache keys which are required
|
226 | 282 |
# based on the required elements at cleanup time
|
... | ... | @@ -245,11 +301,15 @@ class ArtifactCache(): |
245 | 301 |
# can't remove them, we have to abort the build.
|
246 | 302 |
#
|
247 | 303 |
# FIXME: Asking the user what to do may be neater
|
304 |
+ #
|
|
248 | 305 |
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
|
249 | 306 |
'buildstream.conf')
|
250 |
- detail = ("There is not enough space to complete the build.\n"
|
|
251 |
- "Please increase the cache-quota in {}."
|
|
252 |
- .format(self.context.config_origin or default_conf))
|
|
307 |
+ detail = ("Aborted after removing {} refs and saving {} disk space.\n\n"
|
|
308 |
+ "There is not enough space to complete the build.\n"
|
|
309 |
+ "Please increase the cache-quota in {} and/or make more disk space."
|
|
310 |
+ .format(removed_ref_count,
|
|
311 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
312 |
+ (context.config_origin or default_conf)))
|
|
253 | 313 |
|
254 | 314 |
if self.has_quota_exceeded():
|
255 | 315 |
raise ArtifactError("Cache too full. Aborting.",
|
... | ... | @@ -264,10 +324,28 @@ class ArtifactCache(): |
264 | 324 |
# Remove the actual artifact, if it's not required.
|
265 | 325 |
size = self.remove(to_remove)
|
266 | 326 |
|
327 |
+ removed_ref_count += 1
|
|
328 |
+ space_saved += size
|
|
329 |
+ |
|
267 | 330 |
# Remove the size from the removed size
|
268 | 331 |
self.set_cache_size(self._cache_size - size)
|
269 | 332 |
|
270 |
- # This should be O(1) if implemented correctly
|
|
333 |
+ # User callback
|
|
334 |
+ #
|
|
335 |
+ # Currently this process is fairly slow, but we should
|
|
336 |
+ # think about throttling this progress() callback if this
|
|
337 |
+ # becomes too intense.
|
|
338 |
+ if progress:
|
|
339 |
+ progress()
|
|
340 |
+ |
|
341 |
+ # Informational message about the side effects of the cleanup
|
|
342 |
+ self._message(MessageType.INFO, "Cleanup completed",
|
|
343 |
+ detail=("Removed {} refs and saving {} disk space.\n" +
|
|
344 |
+ "Cache usage is now: {}")
|
|
345 |
+ .format(removed_ref_count,
|
|
346 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
347 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2)))
|
|
348 |
+ |
|
271 | 349 |
return self.get_cache_size()
|
272 | 350 |
|
273 | 351 |
# compute_cache_size()
|
... | ... | @@ -279,7 +357,14 @@ class ArtifactCache(): |
279 | 357 |
# (int): The size of the artifact cache.
|
280 | 358 |
#
|
281 | 359 |
def compute_cache_size(self):
|
282 |
- self._cache_size = self.cas.calculate_cache_size()
|
|
360 |
+ old_cache_size = self._cache_size
|
|
361 |
+ new_cache_size = self.cas.calculate_cache_size()
|
|
362 |
+ |
|
363 |
+ if old_cache_size != new_cache_size:
|
|
364 |
+ self._cache_size = new_cache_size
|
|
365 |
+ |
|
366 |
+ usage = ArtifactCacheUsage(self)
|
|
367 |
+ self._message(MessageType.STATUS, "Cache usage recomputed: {}".format(usage))
|
|
283 | 368 |
|
284 | 369 |
return self._cache_size
|
285 | 370 |
|
... | ... | @@ -307,7 +392,7 @@ class ArtifactCache(): |
307 | 392 |
# it is greater than the actual cache size.
|
308 | 393 |
#
|
309 | 394 |
# Returns:
|
310 |
- # (int) An approximation of the artifact cache size.
|
|
395 |
+ # (int) An approximation of the artifact cache size, in bytes.
|
|
311 | 396 |
#
|
312 | 397 |
def get_cache_size(self):
|
313 | 398 |
|
... | ... | @@ -848,19 +933,16 @@ class ArtifactCache(): |
848 | 933 |
else:
|
849 | 934 |
headroom = 2e9
|
850 | 935 |
|
851 |
- artifactdir_volume = self.context.artifactdir
|
|
852 |
- while not os.path.exists(artifactdir_volume):
|
|
853 |
- artifactdir_volume = os.path.dirname(artifactdir_volume)
|
|
854 |
- |
|
855 | 936 |
try:
|
856 |
- cache_quota = utils._parse_size(self.context.config_cache_quota, artifactdir_volume)
|
|
937 |
+ cache_quota = utils._parse_size(self.context.config_cache_quota,
|
|
938 |
+ self.context.artifactdir)
|
|
857 | 939 |
except utils.UtilError as e:
|
858 | 940 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
859 | 941 |
"{}\nPlease specify the value in bytes or as a % of full disk space.\n"
|
860 | 942 |
"\nValid values are, for example: 800M 10G 1T 50%\n"
|
861 | 943 |
.format(str(e))) from e
|
862 | 944 |
|
863 |
- available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
|
|
945 |
+ total_size, available_space = self._get_cache_volume_size()
|
|
864 | 946 |
cache_size = self.get_cache_size()
|
865 | 947 |
|
866 | 948 |
# Ensure system has enough storage for the cache_quota
|
... | ... | @@ -882,16 +964,16 @@ class ArtifactCache(): |
882 | 964 |
else:
|
883 | 965 |
available = utils._pretty_size(available_space)
|
884 | 966 |
|
885 |
- raise LoadError(LoadErrorReason.INVALID_DATA,
|
|
886 |
- ("Your system does not have enough available " +
|
|
887 |
- "space to support the cache quota specified.\n" +
|
|
888 |
- "\nYou have specified a quota of {quota} total disk space.\n" +
|
|
889 |
- "- The filesystem containing {local_cache_path} only " +
|
|
890 |
- "has: {available_size} available.")
|
|
891 |
- .format(
|
|
892 |
- quota=self.context.config_cache_quota,
|
|
893 |
- local_cache_path=self.context.artifactdir,
|
|
894 |
- available_size=available))
|
|
967 |
+ raise ArtifactError("Your system does not have enough available " +
|
|
968 |
+ "space to support the cache quota specified.",
|
|
969 |
+ detail=("You have specified a quota of {quota} total disk space.\n" +
|
|
970 |
+ "The filesystem containing {local_cache_path} only " +
|
|
971 |
+ "has {available_size} available.")
|
|
972 |
+ .format(
|
|
973 |
+ quota=self.context.config_cache_quota,
|
|
974 |
+ local_cache_path=self.context.artifactdir,
|
|
975 |
+ available_size=available),
|
|
976 |
+ reason='insufficient-storage-for-quota')
|
|
895 | 977 |
|
896 | 978 |
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
|
897 | 979 |
# cache_quota to try and avoid exceptions.
|
... | ... | @@ -900,22 +982,25 @@ class ArtifactCache(): |
900 | 982 |
# if we end up writing more than 2G, but hey, this stuff is
|
901 | 983 |
# already really fuzzy.
|
902 | 984 |
#
|
985 |
+ self._cache_quota_original = cache_quota
|
|
903 | 986 |
self._cache_quota = cache_quota - headroom
|
904 | 987 |
self._cache_lower_threshold = self._cache_quota / 2
|
905 | 988 |
|
906 |
- # _get_volume_space_info_for
|
|
907 |
- #
|
|
908 |
- # Get the available space and total space for the given volume
|
|
989 |
+ # _get_cache_volume_size()
|
|
909 | 990 |
#
|
910 |
- # Args:
|
|
911 |
- # volume: volume for which to get the size
|
|
991 |
+ # Get the available space and total space for the volume on
|
|
992 |
+ # which the artifact cache is located.
|
|
912 | 993 |
#
|
913 | 994 |
# Returns:
|
914 |
- # A tuple containing first the availabe number of bytes on the requested
|
|
915 |
- # volume, then the total number of bytes of the volume.
|
|
916 |
- def _get_volume_space_info_for(self, volume):
|
|
917 |
- stat = os.statvfs(volume)
|
|
918 |
- return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
|
|
995 |
+ # (int): The total number of bytes on the volume
|
|
996 |
+ # (int): The number of available bytes on the volume
|
|
997 |
+ #
|
|
998 |
+ # NOTE: We use this stub to allow the test cases
|
|
999 |
+ # to override what an artifact cache thinks
|
|
1000 |
+ # about it's disk size and available bytes.
|
|
1001 |
+ #
|
|
1002 |
+ def _get_cache_volume_size(self):
|
|
1003 |
+ return utils._get_volume_size(self.context.artifactdir)
|
|
919 | 1004 |
|
920 | 1005 |
|
921 | 1006 |
# _configured_remote_artifact_cache_specs():
|
... | ... | @@ -30,7 +30,7 @@ from . import _yaml |
30 | 30 |
from ._exceptions import LoadError, LoadErrorReason, BstError
|
31 | 31 |
from ._message import Message, MessageType
|
32 | 32 |
from ._profile import Topics, profile_start, profile_end
|
33 |
-from ._artifactcache import ArtifactCache
|
|
33 |
+from ._artifactcache import ArtifactCache, ArtifactCacheUsage
|
|
34 | 34 |
from ._cas import CASCache
|
35 | 35 |
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE
|
36 | 36 |
from .plugin import _plugin_lookup
|
... | ... | @@ -289,6 +289,16 @@ class Context(): |
289 | 289 |
|
290 | 290 |
return self._artifactcache
|
291 | 291 |
|
292 |
+ # get_artifact_cache_usage()
|
|
293 |
+ #
|
|
294 |
+ # Fetches the current usage of the artifact cache
|
|
295 |
+ #
|
|
296 |
+ # Returns:
|
|
297 |
+ # (ArtifactCacheUsage): The current status
|
|
298 |
+ #
|
|
299 |
+ def get_artifact_cache_usage(self):
|
|
300 |
+ return ArtifactCacheUsage(self.artifactcache)
|
|
301 |
+ |
|
292 | 302 |
# add_project():
|
293 | 303 |
#
|
294 | 304 |
# Add a project to the context.
|
... | ... | @@ -194,11 +194,6 @@ class App(): |
194 | 194 |
except BstError as e:
|
195 | 195 |
self._error_exit(e, "Error instantiating platform")
|
196 | 196 |
|
197 |
- try:
|
|
198 |
- self.context.artifactcache.preflight()
|
|
199 |
- except BstError as e:
|
|
200 |
- self._error_exit(e, "Error instantiating artifact cache")
|
|
201 |
- |
|
202 | 197 |
# Create the logger right before setting the message handler
|
203 | 198 |
self.logger = LogLine(self.context,
|
204 | 199 |
self._content_profile,
|
... | ... | @@ -211,6 +206,13 @@ class App(): |
211 | 206 |
# Propagate pipeline feedback to the user
|
212 | 207 |
self.context.set_message_handler(self._message_handler)
|
213 | 208 |
|
209 |
+ # Preflight the artifact cache after initializing logging,
|
|
210 |
+ # this can cause messages to be emitted.
|
|
211 |
+ try:
|
|
212 |
+ self.context.artifactcache.preflight()
|
|
213 |
+ except BstError as e:
|
|
214 |
+ self._error_exit(e, "Error instantiating artifact cache")
|
|
215 |
+ |
|
214 | 216 |
#
|
215 | 217 |
# Load the Project
|
216 | 218 |
#
|
... | ... | @@ -353,13 +353,17 @@ class _StatusHeader(): |
353 | 353 |
def render(self, line_length, elapsed):
|
354 | 354 |
project = self._context.get_toplevel_project()
|
355 | 355 |
line_length = max(line_length, 80)
|
356 |
- size = 0
|
|
357 |
- text = ''
|
|
358 | 356 |
|
357 |
+ #
|
|
358 |
+ # Line 1: Session time, project name, session / total elements
|
|
359 |
+ #
|
|
360 |
+ # ========= 00:00:00 project-name (143/387) =========
|
|
361 |
+ #
|
|
359 | 362 |
session = str(len(self._stream.session_elements))
|
360 | 363 |
total = str(len(self._stream.total_elements))
|
361 | 364 |
|
362 |
- # Format and calculate size for target and overall time code
|
|
365 |
+ size = 0
|
|
366 |
+ text = ''
|
|
363 | 367 |
size += len(total) + len(session) + 4 # Size for (N/N) with a leading space
|
364 | 368 |
size += 8 # Size of time code
|
365 | 369 |
size += len(project.name) + 1
|
... | ... | @@ -372,6 +376,12 @@ class _StatusHeader(): |
372 | 376 |
self._format_profile.fmt(')')
|
373 | 377 |
|
374 | 378 |
line1 = self._centered(text, size, line_length, '=')
|
379 |
+ |
|
380 |
+ #
|
|
381 |
+ # Line 2: Dynamic list of queue status reports
|
|
382 |
+ #
|
|
383 |
+ # (Fetched:0 117 0)→ (Built:4 0 0)
|
|
384 |
+ #
|
|
375 | 385 |
size = 0
|
376 | 386 |
text = ''
|
377 | 387 |
|
... | ... | @@ -389,10 +399,28 @@ class _StatusHeader(): |
389 | 399 |
|
390 | 400 |
line2 = self._centered(text, size, line_length, ' ')
|
391 | 401 |
|
392 |
- size = 24
|
|
393 |
- text = self._format_profile.fmt("~~~~~ ") + \
|
|
394 |
- self._content_profile.fmt('Active Tasks') + \
|
|
395 |
- self._format_profile.fmt(" ~~~~~")
|
|
402 |
+ #
|
|
403 |
+ # Line 3: Cache usage percentage report
|
|
404 |
+ #
|
|
405 |
+ # ~~~~~~ cache: 69% ~~~~~~
|
|
406 |
+ #
|
|
407 |
+ usage = self._context.get_artifact_cache_usage()
|
|
408 |
+ usage_percent = '{}%'.format(usage.used_percent)
|
|
409 |
+ |
|
410 |
+ size = 21
|
|
411 |
+ size += len(usage_percent)
|
|
412 |
+ if usage.used_percent >= 95:
|
|
413 |
+ formatted_usage_percent = self._error_profile.fmt(usage_percent)
|
|
414 |
+ elif usage.used_percent >= 80:
|
|
415 |
+ formatted_usage_percent = self._content_profile.fmt(usage_percent)
|
|
416 |
+ else:
|
|
417 |
+ formatted_usage_percent = self._success_profile.fmt(usage_percent)
|
|
418 |
+ |
|
419 |
+ text = self._format_profile.fmt("~~~~~~ ") + \
|
|
420 |
+ self._content_profile.fmt('cache') + \
|
|
421 |
+ self._format_profile.fmt(': ') + \
|
|
422 |
+ formatted_usage_percent + \
|
|
423 |
+ self._format_profile.fmt(' ~~~~~~')
|
|
396 | 424 |
line3 = self._centered(text, size, line_length, ' ')
|
397 | 425 |
|
398 | 426 |
return line1 + '\n' + line2 + '\n' + line3
|
... | ... | @@ -175,29 +175,22 @@ class TypeName(Widget): |
175 | 175 |
# A widget for displaying the Element name
|
176 | 176 |
class ElementName(Widget):
|
177 | 177 |
|
178 |
- def __init__(self, context, content_profile, format_profile):
|
|
179 |
- super(ElementName, self).__init__(context, content_profile, format_profile)
|
|
180 |
- |
|
181 |
- # Pre initialization format string, before we know the length of
|
|
182 |
- # element names in the pipeline
|
|
183 |
- self._fmt_string = '{: <30}'
|
|
184 |
- |
|
185 | 178 |
def render(self, message):
|
179 |
+ action_name = message.action_name
|
|
186 | 180 |
element_id = message.task_id or message.unique_id
|
187 |
- if element_id is None:
|
|
188 |
- return ""
|
|
189 |
- |
|
190 |
- plugin = _plugin_lookup(element_id)
|
|
191 |
- name = plugin._get_full_name()
|
|
181 |
+ if element_id is not None:
|
|
182 |
+ plugin = _plugin_lookup(element_id)
|
|
183 |
+ name = plugin._get_full_name()
|
|
184 |
+ name = '{: <30}'.format(name)
|
|
185 |
+ else:
|
|
186 |
+ name = 'core activity'
|
|
187 |
+ name = '{: <30}'.format(name)
|
|
192 | 188 |
|
193 |
- # Sneak the action name in with the element name
|
|
194 |
- action_name = message.action_name
|
|
195 | 189 |
if not action_name:
|
196 | 190 |
action_name = "Main"
|
197 | 191 |
|
198 | 192 |
return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \
|
199 |
- self.format_profile.fmt(':') + \
|
|
200 |
- self.content_profile.fmt(self._fmt_string.format(name))
|
|
193 |
+ self.format_profile.fmt(':') + self.content_profile.fmt(name)
|
|
201 | 194 |
|
202 | 195 |
|
203 | 196 |
# A widget for displaying the primary message text
|
... | ... | @@ -219,9 +212,12 @@ class CacheKey(Widget): |
219 | 212 |
def render(self, message):
|
220 | 213 |
|
221 | 214 |
element_id = message.task_id or message.unique_id
|
222 |
- if element_id is None or not self._key_length:
|
|
215 |
+ if not self._key_length:
|
|
223 | 216 |
return ""
|
224 | 217 |
|
218 |
+ if element_id is None:
|
|
219 |
+ return ' ' * self._key_length
|
|
220 |
+ |
|
225 | 221 |
missing = False
|
226 | 222 |
key = ' ' * self._key_length
|
227 | 223 |
plugin = _plugin_lookup(element_id)
|
... | ... | @@ -456,6 +452,7 @@ class LogLine(Widget): |
456 | 452 |
values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
|
457 | 453 |
values["Project"] = "{} ({})".format(project.name, project.directory)
|
458 | 454 |
values["Targets"] = ", ".join([t.name for t in stream.targets])
|
455 |
+ values["Cache Usage"] = "{}".format(context.get_artifact_cache_usage())
|
|
459 | 456 |
text += self._format_values(values)
|
460 | 457 |
|
461 | 458 |
# User configurations
|
... | ... | @@ -28,7 +28,20 @@ class CleanupJob(Job): |
28 | 28 |
self._artifacts = context.artifactcache
|
29 | 29 |
|
30 | 30 |
def child_process(self):
|
31 |
- return self._artifacts.clean()
|
|
31 |
+ def progress():
|
|
32 |
+ self.send_message('update-cache-size',
|
|
33 |
+ self._artifacts.get_cache_size())
|
|
34 |
+ return self._artifacts.clean(progress)
|
|
35 |
+ |
|
36 |
+ def handle_message(self, message_type, message):
|
|
37 |
+ |
|
38 |
+ # Update the cache size in the main process as we go,
|
|
39 |
+ # this provides better feedback in the UI.
|
|
40 |
+ if message_type == 'update-cache-size':
|
|
41 |
+ self._artifacts.set_cache_size(message)
|
|
42 |
+ return True
|
|
43 |
+ |
|
44 |
+ return False
|
|
32 | 45 |
|
33 | 46 |
def parent_complete(self, status, result):
|
34 | 47 |
if status == JobStatus.OK:
|
... | ... | @@ -58,10 +58,10 @@ class JobStatus(): |
58 | 58 |
|
59 | 59 |
|
60 | 60 |
# Used to distinguish between status messages and return values
|
61 |
-class Envelope():
|
|
61 |
+class _Envelope():
|
|
62 | 62 |
def __init__(self, message_type, message):
|
63 |
- self._message_type = message_type
|
|
64 |
- self._message = message
|
|
63 |
+ self.message_type = message_type
|
|
64 |
+ self.message = message
|
|
65 | 65 |
|
66 | 66 |
|
67 | 67 |
# Process class that doesn't call waitpid on its own.
|
... | ... | @@ -275,10 +275,37 @@ class Job(): |
275 | 275 |
def set_task_id(self, task_id):
|
276 | 276 |
self._task_id = task_id
|
277 | 277 |
|
278 |
+ # send_message()
|
|
279 |
+ #
|
|
280 |
+ # To be called from inside Job.child_process() implementations
|
|
281 |
+ # to send messages to the main process during processing.
|
|
282 |
+ #
|
|
283 |
+ # These messages will be processed by the class's Job.handle_message()
|
|
284 |
+ # implementation.
|
|
285 |
+ #
|
|
286 |
+ def send_message(self, message_type, message):
|
|
287 |
+ self._queue.put(_Envelope(message_type, message))
|
|
288 |
+ |
|
278 | 289 |
#######################################################
|
279 | 290 |
# Abstract Methods #
|
280 | 291 |
#######################################################
|
281 | 292 |
|
293 |
+ # handle_message()
|
|
294 |
+ #
|
|
295 |
+ # Handle a custom message. This will be called in the main process in
|
|
296 |
+ # response to any messages sent to the main proces using the
|
|
297 |
+ # Job.send_message() API from inside a Job.child_process() implementation
|
|
298 |
+ #
|
|
299 |
+ # Args:
|
|
300 |
+ # message_type (str): A string to identify the message type
|
|
301 |
+ # message (any): A simple serializable object
|
|
302 |
+ #
|
|
303 |
+ # Returns:
|
|
304 |
+ # (bool): Should return a truthy value if message_type is handled.
|
|
305 |
+ #
|
|
306 |
+ def handle_message(self, message_type, message):
|
|
307 |
+ return False
|
|
308 |
+ |
|
282 | 309 |
# parent_complete()
|
283 | 310 |
#
|
284 | 311 |
# This will be executed after the job finishes, and is expected to
|
... | ... | @@ -416,7 +443,7 @@ class Job(): |
416 | 443 |
elapsed=elapsed, detail=e.detail,
|
417 | 444 |
logfile=filename, sandbox=e.sandbox)
|
418 | 445 |
|
419 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
446 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
420 | 447 |
|
421 | 448 |
# Report the exception to the parent (for internal testing purposes)
|
422 | 449 |
self._child_send_error(e)
|
... | ... | @@ -442,7 +469,7 @@ class Job(): |
442 | 469 |
|
443 | 470 |
else:
|
444 | 471 |
# No exception occurred in the action
|
445 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
472 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
446 | 473 |
self._child_send_result(result)
|
447 | 474 |
|
448 | 475 |
elapsed = datetime.datetime.now() - starttime
|
... | ... | @@ -469,7 +496,7 @@ class Job(): |
469 | 496 |
domain = e.domain
|
470 | 497 |
reason = e.reason
|
471 | 498 |
|
472 |
- envelope = Envelope('error', {
|
|
499 |
+ envelope = _Envelope('error', {
|
|
473 | 500 |
'domain': domain,
|
474 | 501 |
'reason': reason
|
475 | 502 |
})
|
... | ... | @@ -487,7 +514,7 @@ class Job(): |
487 | 514 |
#
|
488 | 515 |
def _child_send_result(self, result):
|
489 | 516 |
if result is not None:
|
490 |
- envelope = Envelope('result', result)
|
|
517 |
+ envelope = _Envelope('result', result)
|
|
491 | 518 |
self._queue.put(envelope)
|
492 | 519 |
|
493 | 520 |
# _child_shutdown()
|
... | ... | @@ -524,7 +551,7 @@ class Job(): |
524 | 551 |
if message.message_type == MessageType.LOG:
|
525 | 552 |
return
|
526 | 553 |
|
527 |
- self._queue.put(Envelope('message', message))
|
|
554 |
+ self._queue.put(_Envelope('message', message))
|
|
528 | 555 |
|
529 | 556 |
# _parent_shutdown()
|
530 | 557 |
#
|
... | ... | @@ -588,24 +615,28 @@ class Job(): |
588 | 615 |
if not self._listening:
|
589 | 616 |
return
|
590 | 617 |
|
591 |
- if envelope._message_type == 'message':
|
|
618 |
+ if envelope.message_type == 'message':
|
|
592 | 619 |
# Propagate received messages from children
|
593 | 620 |
# back through the context.
|
594 |
- self._scheduler.context.message(envelope._message)
|
|
595 |
- elif envelope._message_type == 'error':
|
|
621 |
+ self._scheduler.context.message(envelope.message)
|
|
622 |
+ elif envelope.message_type == 'error':
|
|
596 | 623 |
# For regression tests only, save the last error domain / reason
|
597 | 624 |
# reported from a child task in the main process, this global state
|
598 | 625 |
# is currently managed in _exceptions.py
|
599 |
- set_last_task_error(envelope._message['domain'],
|
|
600 |
- envelope._message['reason'])
|
|
601 |
- elif envelope._message_type == 'result':
|
|
626 |
+ set_last_task_error(envelope.message['domain'],
|
|
627 |
+ envelope.message['reason'])
|
|
628 |
+ elif envelope.message_type == 'result':
|
|
602 | 629 |
assert self._result is None
|
603 |
- self._result = envelope._message
|
|
604 |
- elif envelope._message_type == 'child_data':
|
|
630 |
+ self._result = envelope.message
|
|
631 |
+ elif envelope.message_type == 'child_data':
|
|
605 | 632 |
# If we retry a job, we assign a new value to this
|
606 |
- self.child_data = envelope._message
|
|
607 |
- else:
|
|
608 |
- raise Exception()
|
|
633 |
+ self.child_data = envelope.message
|
|
634 |
+ |
|
635 |
+ # Try Job subclass specific messages now
|
|
636 |
+ elif not self.handle_message(envelope.message_type,
|
|
637 |
+ envelope.message):
|
|
638 |
+ assert 0, "Unhandled message type '{}': {}" \
|
|
639 |
+ .format(envelope.message_type, envelope.message)
|
|
609 | 640 |
|
610 | 641 |
# _parent_process_queue()
|
611 | 642 |
#
|
... | ... | @@ -163,4 +163,4 @@ class Resources(): |
163 | 163 |
def unregister_exclusive_interest(self, resources, source):
|
164 | 164 |
|
165 | 165 |
for resource in resources:
|
166 |
- self._exclusive_resources[resource].remove(source)
|
|
166 |
+ self._exclusive_resources[resource].discard(source)
|
... | ... | @@ -40,8 +40,8 @@ class SchedStatus(): |
40 | 40 |
|
41 | 41 |
# Some action names for the internal jobs we launch
|
42 | 42 |
#
|
43 |
-_ACTION_NAME_CLEANUP = 'cleanup'
|
|
44 |
-_ACTION_NAME_CACHE_SIZE = 'cache_size'
|
|
43 |
+_ACTION_NAME_CLEANUP = 'clean'
|
|
44 |
+_ACTION_NAME_CACHE_SIZE = 'size'
|
|
45 | 45 |
|
46 | 46 |
|
47 | 47 |
# Scheduler()
|
... | ... | @@ -151,6 +151,9 @@ class Scheduler(): |
151 | 151 |
# Handle unix signals while running
|
152 | 152 |
self._connect_signals()
|
153 | 153 |
|
154 |
+ # Check if we need to start with some cache maintenance
|
|
155 |
+ self._check_cache_management()
|
|
156 |
+ |
|
154 | 157 |
# Run the queues
|
155 | 158 |
self._sched()
|
156 | 159 |
self.loop.run_forever()
|
... | ... | @@ -272,6 +275,31 @@ class Scheduler(): |
272 | 275 |
# Local Private Methods #
|
273 | 276 |
#######################################################
|
274 | 277 |
|
278 |
+ # _check_cache_management()
|
|
279 |
+ #
|
|
280 |
+ # Run an initial check if we need to lock the cache
|
|
281 |
+ # resource and check the size and possibly launch
|
|
282 |
+ # a cleanup.
|
|
283 |
+ #
|
|
284 |
+ # Sessions which do not add to the cache are not affected.
|
|
285 |
+ #
|
|
286 |
+ def _check_cache_management(self):
|
|
287 |
+ |
|
288 |
+ # Only trigger the check for a scheduler run which has
|
|
289 |
+ # queues which require the CACHE resource.
|
|
290 |
+ if not any(q for q in self.queues
|
|
291 |
+ if ResourceType.CACHE in q.resources):
|
|
292 |
+ return
|
|
293 |
+ |
|
294 |
+ # If the estimated size outgrows the quota, queue a job to
|
|
295 |
+ # actually check the real cache size initially, this one
|
|
296 |
+ # should have exclusive access to the cache to ensure nothing
|
|
297 |
+ # starts while we are checking the cache.
|
|
298 |
+ #
|
|
299 |
+ artifacts = self.context.artifactcache
|
|
300 |
+ if artifacts.has_quota_exceeded():
|
|
301 |
+ self._sched_cache_size_job(exclusive=True)
|
|
302 |
+ |
|
275 | 303 |
# _spawn_job()
|
276 | 304 |
#
|
277 | 305 |
# Spanws a job
|
... | ... | @@ -292,6 +320,11 @@ class Scheduler(): |
292 | 320 |
self._cache_size_running = None
|
293 | 321 |
self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
|
294 | 322 |
|
323 |
+ # Unregister the exclusive interest if there was any
|
|
324 |
+ self.resources.unregister_exclusive_interest(
|
|
325 |
+ [ResourceType.CACHE], 'cache-size'
|
|
326 |
+ )
|
|
327 |
+ |
|
295 | 328 |
# Schedule a cleanup job if we've hit the threshold
|
296 | 329 |
if status != JobStatus.OK:
|
297 | 330 |
return
|
... | ... | @@ -344,11 +377,35 @@ class Scheduler(): |
344 | 377 |
# Runs a cache size job if one is scheduled to run now and
|
345 | 378 |
# sufficient recources are available.
|
346 | 379 |
#
|
347 |
- def _sched_cache_size_job(self):
|
|
380 |
+ # Args:
|
|
381 |
+ # exclusive (bool): Run a cache size job immediately and
|
|
382 |
+ # hold the ResourceType.CACHE resource
|
|
383 |
+ # exclusively (used at startup).
|
|
384 |
+ #
|
|
385 |
+ def _sched_cache_size_job(self, *, exclusive=False):
|
|
386 |
+ |
|
387 |
+ # The exclusive argument is not intended (or safe) for arbitrary use.
|
|
388 |
+ if exclusive:
|
|
389 |
+ assert not self._cache_size_scheduled
|
|
390 |
+ assert not self._cache_size_running
|
|
391 |
+ assert not self._active_jobs
|
|
392 |
+ self._cache_size_scheduled = True
|
|
348 | 393 |
|
349 | 394 |
if self._cache_size_scheduled and not self._cache_size_running:
|
350 | 395 |
|
351 |
- if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS]):
|
|
396 |
+ # Handle the exclusive launch
|
|
397 |
+ exclusive_resources = set()
|
|
398 |
+ if exclusive:
|
|
399 |
+ exclusive_resources.add(ResourceType.CACHE)
|
|
400 |
+ self.resources.register_exclusive_interest(
|
|
401 |
+ exclusive_resources, 'cache-size'
|
|
402 |
+ )
|
|
403 |
+ |
|
404 |
+ # Reserve the resources (with the possible exclusive cache resource)
|
|
405 |
+ if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
|
|
406 |
+ exclusive_resources):
|
|
407 |
+ |
|
408 |
+ # Update state and launch
|
|
352 | 409 |
self._cache_size_scheduled = False
|
353 | 410 |
self._cache_size_running = \
|
354 | 411 |
CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
|
... | ... | @@ -633,6 +633,27 @@ def _get_dir_size(path): |
633 | 633 |
return get_size(path)
|
634 | 634 |
|
635 | 635 |
|
636 |
+# _get_volume_size():
|
|
637 |
+#
|
|
638 |
+# Gets the overall usage and total size of a mounted filesystem in bytes.
|
|
639 |
+#
|
|
640 |
+# Args:
|
|
641 |
+# path (str): The path to check
|
|
642 |
+#
|
|
643 |
+# Returns:
|
|
644 |
+# (int): The total number of bytes on the volume
|
|
645 |
+# (int): The number of available bytes on the volume
|
|
646 |
+#
|
|
647 |
+def _get_volume_size(path):
|
|
648 |
+ try:
|
|
649 |
+ stat_ = os.statvfs(path)
|
|
650 |
+ except OSError as e:
|
|
651 |
+ raise UtilError("Failed to retrieve stats on volume for path '{}': {}"
|
|
652 |
+ .format(path, e)) from e
|
|
653 |
+ |
|
654 |
+ return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail
|
|
655 |
+ |
|
656 |
+ |
|
636 | 657 |
# _parse_size():
|
637 | 658 |
#
|
638 | 659 |
# Convert a string representing data size to a number of
|
... | ... | @@ -667,8 +688,7 @@ def _parse_size(size, volume): |
667 | 688 |
if num > 100:
|
668 | 689 |
raise UtilError("{}% is not a valid percentage value.".format(num))
|
669 | 690 |
|
670 |
- stat_ = os.statvfs(volume)
|
|
671 |
- disk_size = stat_.f_blocks * stat_.f_bsize
|
|
691 |
+ disk_size, _ = _get_volume_size(volume)
|
|
672 | 692 |
|
673 | 693 |
return disk_size * (num / 100)
|
674 | 694 |
|
1 | 1 |
import os
|
2 | 2 |
import pytest
|
3 |
+from unittest import mock
|
|
3 | 4 |
|
4 | 5 |
from buildstream import _yaml
|
5 | 6 |
from buildstream._artifactcache import CACHE_SIZE_FILE
|
7 |
+from buildstream._exceptions import ErrorDomain
|
|
6 | 8 |
|
7 | 9 |
from tests.testutils import cli, create_element_size
|
8 | 10 |
|
... | ... | @@ -60,3 +62,29 @@ def test_cache_size_write(cli, tmpdir): |
60 | 62 |
with open(sizefile, "r") as f:
|
61 | 63 |
size_data = f.read()
|
62 | 64 |
size = int(size_data)
|
65 |
+ |
|
66 |
+ |
|
67 |
+def test_quota_over_1024T(cli, tmpdir):
|
|
68 |
+ KiB = 1024
|
|
69 |
+ MiB = (KiB * 1024)
|
|
70 |
+ GiB = (MiB * 1024)
|
|
71 |
+ TiB = (GiB * 1024)
|
|
72 |
+ |
|
73 |
+ cli.configure({
|
|
74 |
+ 'cache': {
|
|
75 |
+ 'quota': 2048 * TiB
|
|
76 |
+ }
|
|
77 |
+ })
|
|
78 |
+ project = tmpdir.join("main")
|
|
79 |
+ os.makedirs(str(project))
|
|
80 |
+ _yaml.dump({'name': 'main'}, str(project.join("project.conf")))
|
|
81 |
+ |
|
82 |
+ volume_space_patch = mock.patch(
|
|
83 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
84 |
+ autospec=True,
|
|
85 |
+ return_value=(1025 * TiB, 1025 * TiB)
|
|
86 |
+ )
|
|
87 |
+ |
|
88 |
+ with volume_space_patch:
|
|
89 |
+ result = cli.run(project, args=["build", "file.bst"])
|
|
90 |
+ result.assert_main_error(ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota')
|
... | ... | @@ -18,6 +18,7 @@ |
18 | 18 |
#
|
19 | 19 |
|
20 | 20 |
import os
|
21 |
+import re
|
|
21 | 22 |
from unittest import mock
|
22 | 23 |
|
23 | 24 |
import pytest
|
... | ... | @@ -66,8 +67,9 @@ def test_artifact_expires(cli, datafiles, tmpdir): |
66 | 67 |
res.assert_success()
|
67 | 68 |
|
68 | 69 |
# Check that the correct element remains in the cache
|
69 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
70 |
- assert cli.get_element_state(project, 'target2.bst') == 'cached'
|
|
70 |
+ states = cli.get_element_states(project, ['target.bst', 'target2.bst'])
|
|
71 |
+ assert states['target.bst'] != 'cached'
|
|
72 |
+ assert states['target2.bst'] == 'cached'
|
|
71 | 73 |
|
72 | 74 |
|
73 | 75 |
# Ensure that we don't end up deleting the whole cache (or worse) if
|
... | ... | @@ -144,9 +146,11 @@ def test_expiry_order(cli, datafiles, tmpdir): |
144 | 146 |
# have been removed.
|
145 | 147 |
# Note that buildstream will reduce the cache to 50% of the
|
146 | 148 |
# original size - we therefore remove multiple elements.
|
147 |
- |
|
148 |
- assert (tuple(cli.get_element_state(project, element) for element in
|
|
149 |
- ('unrelated.bst', 'target.bst', 'target2.bst', 'dep.bst', 'expire.bst')) ==
|
|
149 |
+ check_elements = [
|
|
150 |
+ 'unrelated.bst', 'target.bst', 'target2.bst', 'dep.bst', 'expire.bst'
|
|
151 |
+ ]
|
|
152 |
+ states = cli.get_element_states(project, check_elements)
|
|
153 |
+ assert (tuple(states[element] for element in check_elements) ==
|
|
150 | 154 |
('buildable', 'buildable', 'buildable', 'cached', 'cached', ))
|
151 | 155 |
|
152 | 156 |
|
... | ... | @@ -176,8 +180,9 @@ def test_keep_dependencies(cli, datafiles, tmpdir): |
176 | 180 |
res.assert_success()
|
177 | 181 |
|
178 | 182 |
# Check that the correct element remains in the cache
|
179 |
- assert cli.get_element_state(project, 'dependency.bst') == 'cached'
|
|
180 |
- assert cli.get_element_state(project, 'unrelated.bst') == 'cached'
|
|
183 |
+ states = cli.get_element_states(project, ['dependency.bst', 'unrelated.bst'])
|
|
184 |
+ assert states['dependency.bst'] == 'cached'
|
|
185 |
+ assert states['unrelated.bst'] == 'cached'
|
|
181 | 186 |
|
182 | 187 |
# We try to build an element which depends on the LRU artifact,
|
183 | 188 |
# and could therefore fail if we didn't make sure dependencies
|
... | ... | @@ -192,9 +197,10 @@ def test_keep_dependencies(cli, datafiles, tmpdir): |
192 | 197 |
res = cli.run(project=project, args=['build', 'target.bst'])
|
193 | 198 |
res.assert_success()
|
194 | 199 |
|
195 |
- assert cli.get_element_state(project, 'unrelated.bst') != 'cached'
|
|
196 |
- assert cli.get_element_state(project, 'dependency.bst') == 'cached'
|
|
197 |
- assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
200 |
+ states = cli.get_element_states(project, ['target.bst', 'unrelated.bst'])
|
|
201 |
+ assert states['target.bst'] == 'cached'
|
|
202 |
+ assert states['dependency.bst'] == 'cached'
|
|
203 |
+ assert states['unrelated.bst'] != 'cached'
|
|
198 | 204 |
|
199 | 205 |
|
200 | 206 |
# Assert that we never delete a dependency required for a build tree
|
... | ... | @@ -239,11 +245,11 @@ def test_never_delete_required(cli, datafiles, tmpdir): |
239 | 245 |
# life there may potentially be N-builders cached artifacts
|
240 | 246 |
# which exceed the quota
|
241 | 247 |
#
|
242 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
243 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
244 |
- |
|
245 |
- assert cli.get_element_state(project, 'dep3.bst') != 'cached'
|
|
246 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
248 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
249 |
+ assert states['dep1.bst'] == 'cached'
|
|
250 |
+ assert states['dep2.bst'] == 'cached'
|
|
251 |
+ assert states['dep3.bst'] != 'cached'
|
|
252 |
+ assert states['target.bst'] != 'cached'
|
|
247 | 253 |
|
248 | 254 |
|
249 | 255 |
# Assert that we never delete a dependency required for a build tree,
|
... | ... | @@ -275,10 +281,11 @@ def test_never_delete_required_track(cli, datafiles, tmpdir): |
275 | 281 |
res.assert_success()
|
276 | 282 |
|
277 | 283 |
# They should all be cached
|
278 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
279 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
280 |
- assert cli.get_element_state(project, 'dep3.bst') == 'cached'
|
|
281 |
- assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
284 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
285 |
+ assert states['dep1.bst'] == 'cached'
|
|
286 |
+ assert states['dep2.bst'] == 'cached'
|
|
287 |
+ assert states['dep3.bst'] == 'cached'
|
|
288 |
+ assert states['target.bst'] == 'cached'
|
|
282 | 289 |
|
283 | 290 |
# Now increase the size of all the elements
|
284 | 291 |
#
|
... | ... | @@ -296,28 +303,37 @@ def test_never_delete_required_track(cli, datafiles, tmpdir): |
296 | 303 |
|
297 | 304 |
# Expect the same result that we did in test_never_delete_required()
|
298 | 305 |
#
|
299 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
300 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
301 |
- assert cli.get_element_state(project, 'dep3.bst') != 'cached'
|
|
302 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
306 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
307 |
+ assert states['dep1.bst'] == 'cached'
|
|
308 |
+ assert states['dep2.bst'] == 'cached'
|
|
309 |
+ assert states['dep3.bst'] != 'cached'
|
|
310 |
+ assert states['target.bst'] != 'cached'
|
|
303 | 311 |
|
304 | 312 |
|
305 | 313 |
# Ensure that only valid cache quotas make it through the loading
|
306 | 314 |
# process.
|
307 |
-@pytest.mark.parametrize("quota,success", [
|
|
308 |
- ("1", True),
|
|
309 |
- ("1K", True),
|
|
310 |
- ("50%", True),
|
|
311 |
- ("infinity", True),
|
|
312 |
- ("0", True),
|
|
313 |
- ("-1", False),
|
|
314 |
- ("pony", False),
|
|
315 |
- ("7K", False),
|
|
316 |
- ("70%", False),
|
|
317 |
- ("200%", False)
|
|
315 |
+#
|
|
316 |
+# This test virtualizes the condition to assume a storage volume
|
|
317 |
+# has 10K total disk space, and 6K of it is already in use (not
|
|
318 |
+# including any space used by the artifact cache).
|
|
319 |
+#
|
|
320 |
+@pytest.mark.parametrize("quota,err_domain,err_reason", [
|
|
321 |
+ # Valid configurations
|
|
322 |
+ ("1", 'success', None),
|
|
323 |
+ ("1K", 'success', None),
|
|
324 |
+ ("50%", 'success', None),
|
|
325 |
+ ("infinity", 'success', None),
|
|
326 |
+ ("0", 'success', None),
|
|
327 |
+ # Invalid configurations
|
|
328 |
+ ("-1", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
329 |
+ ("pony", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
330 |
+ ("200%", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA),
|
|
331 |
+ # Not enough space for these caches
|
|
332 |
+ ("7K", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota'),
|
|
333 |
+ ("70%", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota')
|
|
318 | 334 |
])
|
319 | 335 |
@pytest.mark.datafiles(DATA_DIR)
|
320 |
-def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success):
|
|
336 |
+def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, err_domain, err_reason):
|
|
321 | 337 |
project = os.path.join(datafiles.dirname, datafiles.basename)
|
322 | 338 |
os.makedirs(os.path.join(project, 'elements'))
|
323 | 339 |
|
... | ... | @@ -342,9 +358,9 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success): |
342 | 358 |
total_space = 10000
|
343 | 359 |
|
344 | 360 |
volume_space_patch = mock.patch(
|
345 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
361 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
346 | 362 |
autospec=True,
|
347 |
- return_value=(free_space, total_space),
|
|
363 |
+ return_value=(total_space, free_space),
|
|
348 | 364 |
)
|
349 | 365 |
|
350 | 366 |
cache_size_patch = mock.patch(
|
... | ... | @@ -356,10 +372,10 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success): |
356 | 372 |
with volume_space_patch, cache_size_patch:
|
357 | 373 |
res = cli.run(project=project, args=['workspace', 'list'])
|
358 | 374 |
|
359 |
- if success:
|
|
375 |
+ if err_domain == 'success':
|
|
360 | 376 |
res.assert_success()
|
361 | 377 |
else:
|
362 |
- res.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
|
|
378 |
+ res.assert_main_error(err_domain, err_reason)
|
|
363 | 379 |
|
364 | 380 |
|
365 | 381 |
@pytest.mark.datafiles(DATA_DIR)
|
... | ... | @@ -410,3 +426,66 @@ def test_extract_expiry(cli, datafiles, tmpdir): |
410 | 426 |
|
411 | 427 |
assert os.path.isdir(refsdirtarget2)
|
412 | 428 |
assert not os.path.exists(refsdirtarget)
|
429 |
+ |
|
430 |
+ |
|
431 |
+# Ensures that when launching BuildStream with a full artifact cache,
|
|
432 |
+# the cache size and cleanup jobs are run before any other jobs.
|
|
433 |
+#
|
|
434 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
435 |
+def test_cleanup_first(cli, datafiles, tmpdir):
|
|
436 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
437 |
+ element_path = 'elements'
|
|
438 |
+ cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree')
|
|
439 |
+ checkout = os.path.join(project, 'checkout')
|
|
440 |
+ |
|
441 |
+ cli.configure({
|
|
442 |
+ 'cache': {
|
|
443 |
+ 'quota': 10000000,
|
|
444 |
+ }
|
|
445 |
+ })
|
|
446 |
+ |
|
447 |
+ # Create an element that uses almost the entire cache (an empty
|
|
448 |
+ # ostree cache starts at about ~10KiB, so we need a bit of a
|
|
449 |
+ # buffer)
|
|
450 |
+ create_element_size('target.bst', project, element_path, [], 8000000)
|
|
451 |
+ res = cli.run(project=project, args=['build', 'target.bst'])
|
|
452 |
+ res.assert_success()
|
|
453 |
+ |
|
454 |
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
455 |
+ |
|
456 |
+ # Now configure with a smaller quota, create a situation
|
|
457 |
+ # where the cache must be cleaned up before building anything else.
|
|
458 |
+ #
|
|
459 |
+ # Fix the fetchers and builders just to ensure a predictable
|
|
460 |
+ # sequence of events (although it does not effect this test)
|
|
461 |
+ cli.configure({
|
|
462 |
+ 'cache': {
|
|
463 |
+ 'quota': 5000000,
|
|
464 |
+ },
|
|
465 |
+ 'scheduler': {
|
|
466 |
+ 'fetchers': 1,
|
|
467 |
+ 'builders': 1
|
|
468 |
+ }
|
|
469 |
+ })
|
|
470 |
+ |
|
471 |
+ # Our cache is now more than full, BuildStream
|
|
472 |
+ create_element_size('target2.bst', project, element_path, [], 4000000)
|
|
473 |
+ res = cli.run(project=project, args=['build', 'target2.bst'])
|
|
474 |
+ res.assert_success()
|
|
475 |
+ |
|
476 |
+ # Find all of the activity (like push, pull, fetch) lines
|
|
477 |
+ results = re.findall(r'\[.*\]\[.*\]\[\s*(\S+):.*\]\s*START\s*.*\.log', res.stderr)
|
|
478 |
+ |
|
479 |
+ # Don't bother checking the order of 'fetch', it is allowed to start
|
|
480 |
+ # before or after the initial cache size job, runs in parallel, and does
|
|
481 |
+ # not require ResourceType.CACHE.
|
|
482 |
+ results.remove('fetch')
|
|
483 |
+ print(results)
|
|
484 |
+ |
|
485 |
+ # Assert the expected sequence of events
|
|
486 |
+ assert results == ['size', 'clean', 'build']
|
|
487 |
+ |
|
488 |
+ # Check that the correct element remains in the cache
|
|
489 |
+ states = cli.get_element_states(project, ['target.bst', 'target2.bst'])
|
|
490 |
+ assert states['target.bst'] != 'cached'
|
|
491 |
+ assert states['target2.bst'] == 'cached'
|
... | ... | @@ -41,7 +41,7 @@ def test_default_logging(cli, tmpdir, datafiles): |
41 | 41 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
42 | 42 |
result.assert_success()
|
43 | 43 |
|
44 |
- m = re.search(r"\[\d\d:\d\d:\d\d\]\[\]\[\] SUCCESS Checking sources", result.stderr)
|
|
44 |
+ m = re.search(r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr)
|
|
45 | 45 |
assert(m is not None)
|
46 | 46 |
|
47 | 47 |
|
... | ... | @@ -77,7 +77,7 @@ def test_custom_logging(cli, tmpdir, datafiles): |
77 | 77 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
78 | 78 |
result.assert_success()
|
79 | 79 |
|
80 |
- m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,,,SUCCESS,Checking sources", result.stderr)
|
|
80 |
+ m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\s*,.*,SUCCESS,Checking sources", result.stderr)
|
|
81 | 81 |
assert(m is not None)
|
82 | 82 |
|
83 | 83 |
|
... | ... | @@ -123,7 +123,7 @@ def test_track_recurse(cli, tmpdir, datafiles, kind, amount): |
123 | 123 |
last_element_name = element_name
|
124 | 124 |
|
125 | 125 |
# Assert that a fetch is needed
|
126 |
- states = cli.get_element_states(project, last_element_name)
|
|
126 |
+ states = cli.get_element_states(project, [last_element_name])
|
|
127 | 127 |
for element_name in element_names:
|
128 | 128 |
assert states[element_name] == 'no reference'
|
129 | 129 |
|
... | ... | @@ -143,7 +143,7 @@ def test_track_recurse(cli, tmpdir, datafiles, kind, amount): |
143 | 143 |
result.assert_success()
|
144 | 144 |
|
145 | 145 |
# Assert that the base is buildable and the rest are waiting
|
146 |
- states = cli.get_element_states(project, last_element_name)
|
|
146 |
+ states = cli.get_element_states(project, [last_element_name])
|
|
147 | 147 |
for element_name in element_names:
|
148 | 148 |
if element_name == element_names[0]:
|
149 | 149 |
assert states[element_name] == 'buildable'
|
... | ... | @@ -2,7 +2,7 @@ import os |
2 | 2 |
import shutil
|
3 | 3 |
import pytest
|
4 | 4 |
|
5 |
-from tests.testutils import cli_integration as cli, create_artifact_share
|
|
5 |
+from tests.testutils import cli, cli_integration as cli2, create_artifact_share
|
|
6 | 6 |
from tests.testutils.integration import assert_contains
|
7 | 7 |
from tests.testutils.site import HAVE_BWRAP, IS_LINUX
|
8 | 8 |
from buildstream._exceptions import ErrorDomain, LoadErrorReason
|
... | ... | @@ -32,7 +32,7 @@ def default_state(cli, tmpdir, share): |
32 | 32 |
@pytest.mark.integration
|
33 | 33 |
@pytest.mark.datafiles(DATA_DIR)
|
34 | 34 |
@pytest.mark.skipif(IS_LINUX and not HAVE_BWRAP, reason='Only available with bubblewrap on Linux')
|
35 |
-def test_pullbuildtrees(cli, tmpdir, datafiles):
|
|
35 |
+def test_pullbuildtrees(cli2, tmpdir, datafiles):
|
|
36 | 36 |
project = os.path.join(datafiles.dirname, datafiles.basename)
|
37 | 37 |
element_name = 'autotools/amhello.bst'
|
38 | 38 |
|
... | ... | @@ -40,115 +40,115 @@ def test_pullbuildtrees(cli, tmpdir, datafiles): |
40 | 40 |
with create_artifact_share(os.path.join(str(tmpdir), 'share1')) as share1,\
|
41 | 41 |
create_artifact_share(os.path.join(str(tmpdir), 'share2')) as share2,\
|
42 | 42 |
create_artifact_share(os.path.join(str(tmpdir), 'share3')) as share3:
|
43 |
- cli.configure({
|
|
43 |
+ cli2.configure({
|
|
44 | 44 |
'artifacts': {'url': share1.repo, 'push': True},
|
45 | 45 |
'artifactdir': os.path.join(str(tmpdir), 'artifacts')
|
46 | 46 |
})
|
47 | 47 |
|
48 | 48 |
# Build autotools element, checked pushed, delete local
|
49 |
- result = cli.run(project=project, args=['build', element_name])
|
|
49 |
+ result = cli2.run(project=project, args=['build', element_name])
|
|
50 | 50 |
assert result.exit_code == 0
|
51 |
- assert cli.get_element_state(project, element_name) == 'cached'
|
|
52 |
- assert share1.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
53 |
- default_state(cli, tmpdir, share1)
|
|
51 |
+ assert cli2.get_element_state(project, element_name) == 'cached'
|
|
52 |
+ assert share1.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
53 |
+ default_state(cli2, tmpdir, share1)
|
|
54 | 54 |
|
55 | 55 |
# Pull artifact with default config, assert that pulling again
|
56 | 56 |
# doesn't create a pull job, then assert with buildtrees user
|
57 | 57 |
# config set creates a pull job.
|
58 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
58 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
59 | 59 |
assert element_name in result.get_pulled_elements()
|
60 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
60 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
61 | 61 |
assert element_name not in result.get_pulled_elements()
|
62 |
- cli.configure({'cache': {'pull-buildtrees': True}})
|
|
63 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
62 |
+ cli2.configure({'cache': {'pull-buildtrees': True}})
|
|
63 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
64 | 64 |
assert element_name in result.get_pulled_elements()
|
65 |
- default_state(cli, tmpdir, share1)
|
|
65 |
+ default_state(cli2, tmpdir, share1)
|
|
66 | 66 |
|
67 | 67 |
# Pull artifact with default config, then assert that pulling
|
68 | 68 |
# with buildtrees cli flag set creates a pull job.
|
69 | 69 |
# Also assert that the buildtree is added to the artifact's
|
70 | 70 |
# extract dir
|
71 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
71 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
72 | 72 |
assert element_name in result.get_pulled_elements()
|
73 |
- elementdigest = share1.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
73 |
+ elementdigest = share1.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
74 | 74 |
buildtreedir = os.path.join(str(tmpdir), 'artifacts', 'extract', 'test', 'autotools-amhello',
|
75 | 75 |
elementdigest.hash, 'buildtree')
|
76 | 76 |
assert not os.path.isdir(buildtreedir)
|
77 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
77 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
78 | 78 |
assert element_name in result.get_pulled_elements()
|
79 | 79 |
assert os.path.isdir(buildtreedir)
|
80 |
- default_state(cli, tmpdir, share1)
|
|
80 |
+ default_state(cli2, tmpdir, share1)
|
|
81 | 81 |
|
82 | 82 |
# Pull artifact with pullbuildtrees set in user config, then assert
|
83 | 83 |
# that pulling with the same user config doesn't creates a pull job,
|
84 | 84 |
# or when buildtrees cli flag is set.
|
85 |
- cli.configure({'cache': {'pull-buildtrees': True}})
|
|
86 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
85 |
+ cli2.configure({'cache': {'pull-buildtrees': True}})
|
|
86 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
87 | 87 |
assert element_name in result.get_pulled_elements()
|
88 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
88 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
89 | 89 |
assert element_name not in result.get_pulled_elements()
|
90 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
90 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
91 | 91 |
assert element_name not in result.get_pulled_elements()
|
92 |
- default_state(cli, tmpdir, share1)
|
|
92 |
+ default_state(cli2, tmpdir, share1)
|
|
93 | 93 |
|
94 | 94 |
# Pull artifact with default config and buildtrees cli flag set, then assert
|
95 | 95 |
# that pulling with pullbuildtrees set in user config doesn't create a pull
|
96 | 96 |
# job.
|
97 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
97 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
98 | 98 |
assert element_name in result.get_pulled_elements()
|
99 |
- cli.configure({'cache': {'pull-buildtrees': True}})
|
|
100 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
99 |
+ cli2.configure({'cache': {'pull-buildtrees': True}})
|
|
100 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
101 | 101 |
assert element_name not in result.get_pulled_elements()
|
102 |
- default_state(cli, tmpdir, share1)
|
|
102 |
+ default_state(cli2, tmpdir, share1)
|
|
103 | 103 |
|
104 | 104 |
# Assert that a partial build element (not containing a populated buildtree dir)
|
105 | 105 |
# can't be pushed to an artifact share, then assert that a complete build element
|
106 | 106 |
# can be. This will attempt a partial pull from share1 and then a partial push
|
107 | 107 |
# to share2
|
108 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
108 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
109 | 109 |
assert element_name in result.get_pulled_elements()
|
110 |
- cli.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
|
111 |
- result = cli.run(project=project, args=['artifact', 'push', element_name])
|
|
110 |
+ cli2.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
|
111 |
+ result = cli2.run(project=project, args=['artifact', 'push', element_name])
|
|
112 | 112 |
assert element_name not in result.get_pushed_elements()
|
113 |
- assert not share2.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
113 |
+ assert not share2.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
114 | 114 |
|
115 | 115 |
# Assert that after pulling the missing buildtree the element artifact can be
|
116 | 116 |
# successfully pushed to the remote. This will attempt to pull the buildtree
|
117 | 117 |
# from share1 and then a 'complete' push to share2
|
118 |
- cli.configure({'artifacts': {'url': share1.repo, 'push': False}})
|
|
119 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
118 |
+ cli2.configure({'artifacts': {'url': share1.repo, 'push': False}})
|
|
119 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'pull', element_name])
|
|
120 | 120 |
assert element_name in result.get_pulled_elements()
|
121 |
- cli.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
|
122 |
- result = cli.run(project=project, args=['artifact', 'push', element_name])
|
|
121 |
+ cli2.configure({'artifacts': {'url': share2.repo, 'push': True}})
|
|
122 |
+ result = cli2.run(project=project, args=['artifact', 'push', element_name])
|
|
123 | 123 |
assert element_name in result.get_pushed_elements()
|
124 |
- assert share2.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
125 |
- default_state(cli, tmpdir, share1)
|
|
124 |
+ assert share2.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
125 |
+ default_state(cli2, tmpdir, share1)
|
|
126 | 126 |
|
127 | 127 |
# Assert that bst push will automatically attempt to pull a missing buildtree
|
128 | 128 |
# if pull-buildtrees is set, however as share3 is the only defined remote and is empty,
|
129 | 129 |
# assert that no element artifact buildtrees are pulled (no available remote buildtree) and thus the
|
130 | 130 |
# artifact cannot be pushed.
|
131 |
- result = cli.run(project=project, args=['artifact', 'pull', element_name])
|
|
131 |
+ result = cli2.run(project=project, args=['artifact', 'pull', element_name])
|
|
132 | 132 |
assert element_name in result.get_pulled_elements()
|
133 |
- cli.configure({'artifacts': {'url': share3.repo, 'push': True}})
|
|
134 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
133 |
+ cli2.configure({'artifacts': {'url': share3.repo, 'push': True}})
|
|
134 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
135 | 135 |
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
|
136 | 136 |
assert element_name not in result.get_pulled_elements()
|
137 | 137 |
assert not os.path.isdir(buildtreedir)
|
138 | 138 |
assert element_name not in result.get_pushed_elements()
|
139 |
- assert not share3.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
139 |
+ assert not share3.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
140 | 140 |
|
141 | 141 |
# Assert that if we add an extra remote that has the buildtree artfact cached, bst push will
|
142 | 142 |
# automatically attempt to pull it and will be successful, leading to the full artifact being pushed
|
143 | 143 |
# to the empty share3. This gives the ability to attempt push currently partial artifacts to a remote,
|
144 | 144 |
# without exlipictly requiring a bst pull.
|
145 |
- cli.configure({'artifacts': [{'url': share1.repo, 'push': False}, {'url': share3.repo, 'push': True}]})
|
|
146 |
- result = cli.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
145 |
+ cli2.configure({'artifacts': [{'url': share1.repo, 'push': False}, {'url': share3.repo, 'push': True}]})
|
|
146 |
+ result = cli2.run(project=project, args=['--pull-buildtrees', 'artifact', 'push', element_name])
|
|
147 | 147 |
assert "Attempting to fetch missing artifact buildtrees" in result.stderr
|
148 | 148 |
assert element_name in result.get_pulled_elements()
|
149 | 149 |
assert os.path.isdir(buildtreedir)
|
150 | 150 |
assert element_name in result.get_pushed_elements()
|
151 |
- assert share3.has_artifact('test', element_name, cli.get_element_key(project, element_name))
|
|
151 |
+ assert share3.has_artifact('test', element_name, cli2.get_element_key(project, element_name))
|
|
152 | 152 |
|
153 | 153 |
|
154 | 154 |
# Ensure that only valid pull-buildtrees boolean options make it through the loading
|
1 |
-import os
|
|
2 |
-from unittest import mock
|
|
3 |
- |
|
4 |
-from buildstream import _yaml
|
|
5 |
- |
|
6 |
-from ..testutils.runcli import cli
|
|
7 |
- |
|
8 |
- |
|
9 |
-KiB = 1024
|
|
10 |
-MiB = (KiB * 1024)
|
|
11 |
-GiB = (MiB * 1024)
|
|
12 |
-TiB = (GiB * 1024)
|
|
13 |
- |
|
14 |
- |
|
15 |
-def test_parse_size_over_1024T(cli, tmpdir):
|
|
16 |
- cli.configure({
|
|
17 |
- 'cache': {
|
|
18 |
- 'quota': 2048 * TiB
|
|
19 |
- }
|
|
20 |
- })
|
|
21 |
- project = tmpdir.join("main")
|
|
22 |
- os.makedirs(str(project))
|
|
23 |
- _yaml.dump({'name': 'main'}, str(project.join("project.conf")))
|
|
24 |
- |
|
25 |
- volume_space_patch = mock.patch(
|
|
26 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
27 |
- autospec=True,
|
|
28 |
- return_value=(1025 * TiB, 1025 * TiB)
|
|
29 |
- )
|
|
30 |
- |
|
31 |
- with volume_space_patch:
|
|
32 |
- result = cli.run(project, args=["build", "file.bst"])
|
|
33 |
- failure_msg = 'Your system does not have enough available space to support the cache quota specified.'
|
|
34 |
- assert failure_msg in result.stderr
|
... | ... | @@ -398,13 +398,12 @@ class Cli(): |
398 | 398 |
#
|
399 | 399 |
# Returns a dictionary with the element names as keys
|
400 | 400 |
#
|
401 |
- def get_element_states(self, project, target, deps='all'):
|
|
401 |
+ def get_element_states(self, project, targets, deps='all'):
|
|
402 | 402 |
result = self.run(project=project, silent=True, args=[
|
403 | 403 |
'show',
|
404 | 404 |
'--deps', deps,
|
405 | 405 |
'--format', '%{name}||%{state}',
|
406 |
- target
|
|
407 |
- ])
|
|
406 |
+ ] + targets)
|
|
408 | 407 |
result.assert_success()
|
409 | 408 |
lines = result.output.splitlines()
|
410 | 409 |
states = {}
|