Tristan Van Berkom pushed to branch tristan/cache-management at BuildStream / buildstream
Commits:
-
3b60416e
by Tristan Van Berkom at 2019-01-22T19:19:52Z
-
ff2d368e
by Tristan Van Berkom at 2019-01-22T19:19:52Z
-
d1c1ef7a
by Tristan Van Berkom at 2019-01-22T19:19:52Z
-
44e1931b
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
96798d99
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
ec023ad2
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
80ccad8a
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
a6bc4d7d
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
cc233084
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
afb794c2
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
568b6a0b
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
67056060
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
b2f813fd
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
a80bf9f6
by Tristan Van Berkom at 2019-01-22T19:19:53Z
-
da5fdba8
by Tristan Van Berkom at 2019-01-22T19:19:53Z
14 changed files:
- buildstream/_artifactcache.py
- buildstream/_context.py
- buildstream/_frontend/app.py
- buildstream/_frontend/status.py
- buildstream/_frontend/widget.py
- buildstream/_scheduler/jobs/cleanupjob.py
- buildstream/_scheduler/jobs/job.py
- buildstream/_scheduler/scheduler.py
- buildstream/utils.py
- tests/artifactcache/cache_size.py
- tests/artifactcache/expiry.py
- tests/frontend/logging.py
- tests/frontend/track.py
- tests/testutils/runcli.py
Changes:
| ... | ... | @@ -46,6 +46,39 @@ class ArtifactCacheSpec(CASRemoteSpec): |
| 46 | 46 |
pass
|
| 47 | 47 |
|
| 48 | 48 |
|
| 49 |
+# ArtifactCacheUsage
|
|
| 50 |
+#
|
|
| 51 |
+# A simple object to report the current artifact cache
|
|
| 52 |
+# usage details.
|
|
| 53 |
+#
|
|
| 54 |
+# Note that this uses the user configured cache quota
|
|
| 55 |
+# rather than the internal quota with protective headroom
|
|
| 56 |
+# removed, to provide a more sensible value to display to
|
|
| 57 |
+# the user.
|
|
| 58 |
+#
|
|
| 59 |
+# Args:
|
|
| 60 |
+# artifacts (ArtifactCache): The artifact cache to get the status of
|
|
| 61 |
+#
|
|
| 62 |
+class ArtifactCacheUsage():
|
|
| 63 |
+ |
|
| 64 |
+ def __init__(self, artifacts):
|
|
| 65 |
+ context = artifacts.context
|
|
| 66 |
+ self.quota_config = context.config_cache_quota # Configured quota
|
|
| 67 |
+ self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes
|
|
| 68 |
+ self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes
|
|
| 69 |
+ self.used_percent = 0 # Percentage of the quota used
|
|
| 70 |
+ if self.quota_size is not None:
|
|
| 71 |
+ self.used_percent = int(self.used_size * 100 / self.quota_size)
|
|
| 72 |
+ |
|
| 73 |
+ # Formattable into a human readable string
|
|
| 74 |
+ #
|
|
| 75 |
+ def __str__(self):
|
|
| 76 |
+ return "{} / {} ({}%)" \
|
|
| 77 |
+ .format(utils._pretty_size(self.used_size, dec_places=1),
|
|
| 78 |
+ self.quota_config,
|
|
| 79 |
+ self.used_percent)
|
|
| 80 |
+ |
|
| 81 |
+ |
|
| 49 | 82 |
# An ArtifactCache manages artifacts.
|
| 50 | 83 |
#
|
| 51 | 84 |
# Args:
|
| ... | ... | @@ -64,6 +97,7 @@ class ArtifactCache(): |
| 64 | 97 |
self._required_elements = set() # The elements required for this session
|
| 65 | 98 |
self._cache_size = None # The current cache size, sometimes it's an estimate
|
| 66 | 99 |
self._cache_quota = None # The cache quota
|
| 100 |
+ self._cache_quota_original = None # The cache quota as specified by the user, in bytes
|
|
| 67 | 101 |
self._cache_lower_threshold = None # The target cache size for a cleanup
|
| 68 | 102 |
self._remotes_setup = False # Check to prevent double-setup of remotes
|
| 69 | 103 |
|
| ... | ... | @@ -216,11 +250,33 @@ class ArtifactCache(): |
| 216 | 250 |
#
|
| 217 | 251 |
# Clean the artifact cache as much as possible.
|
| 218 | 252 |
#
|
| 253 |
+ # Args:
|
|
| 254 |
+ # progress (callable): A callback to call when a ref is removed
|
|
| 255 |
+ #
|
|
| 219 | 256 |
# Returns:
|
| 220 | 257 |
# (int): The size of the cache after having cleaned up
|
| 221 | 258 |
#
|
| 222 |
- def clean(self):
|
|
| 259 |
+ def clean(self, progress=None):
|
|
| 223 | 260 |
artifacts = self.list_artifacts()
|
| 261 |
+ context = self.context
|
|
| 262 |
+ |
|
| 263 |
+ # Some accumulative statistics
|
|
| 264 |
+ removed_ref_count = 0
|
|
| 265 |
+ space_saved = 0
|
|
| 266 |
+ |
|
| 267 |
+ # Start off with an announcement with as much info as possible
|
|
| 268 |
+ volume_size, volume_avail = self._get_cache_volume_size()
|
|
| 269 |
+ self._message(MessageType.STATUS, "Starting cache cleanup",
|
|
| 270 |
+ detail=("Elements required by the current build plan: {}\n" +
|
|
| 271 |
+ "User specified quota: {} ({})\n" +
|
|
| 272 |
+ "Cache usage: {}\n" +
|
|
| 273 |
+ "Cache volume: {} total, {} available")
|
|
| 274 |
+ .format(len(self._required_elements),
|
|
| 275 |
+ context.config_cache_quota,
|
|
| 276 |
+ utils._pretty_size(self._cache_quota_original, dec_places=2),
|
|
| 277 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2),
|
|
| 278 |
+ utils._pretty_size(volume_size, dec_places=2),
|
|
| 279 |
+ utils._pretty_size(volume_avail, dec_places=2)))
|
|
| 224 | 280 |
|
| 225 | 281 |
# Build a set of the cache keys which are required
|
| 226 | 282 |
# based on the required elements at cleanup time
|
| ... | ... | @@ -245,11 +301,15 @@ class ArtifactCache(): |
| 245 | 301 |
# can't remove them, we have to abort the build.
|
| 246 | 302 |
#
|
| 247 | 303 |
# FIXME: Asking the user what to do may be neater
|
| 304 |
+ #
|
|
| 248 | 305 |
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
|
| 249 | 306 |
'buildstream.conf')
|
| 250 |
- detail = ("There is not enough space to complete the build.\n"
|
|
| 251 |
- "Please increase the cache-quota in {}."
|
|
| 252 |
- .format(self.context.config_origin or default_conf))
|
|
| 307 |
+ detail = ("Aborted after removing {} refs and saving {} disk space.\n\n"
|
|
| 308 |
+ "There is not enough space to complete the build.\n"
|
|
| 309 |
+ "Please increase the cache-quota in {} and/or make more disk space."
|
|
| 310 |
+ .format(removed_ref_count,
|
|
| 311 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
| 312 |
+ (context.config_origin or default_conf)))
|
|
| 253 | 313 |
|
| 254 | 314 |
if self.has_quota_exceeded():
|
| 255 | 315 |
raise ArtifactError("Cache too full. Aborting.",
|
| ... | ... | @@ -264,10 +324,24 @@ class ArtifactCache(): |
| 264 | 324 |
# Remove the actual artifact, if it's not required.
|
| 265 | 325 |
size = self.remove(to_remove)
|
| 266 | 326 |
|
| 327 |
+ removed_ref_count += 1
|
|
| 328 |
+ space_saved += size
|
|
| 329 |
+ |
|
| 267 | 330 |
# Remove the size from the removed size
|
| 268 | 331 |
self.set_cache_size(self._cache_size - size)
|
| 269 | 332 |
|
| 270 |
- # This should be O(1) if implemented correctly
|
|
| 333 |
+ # User callback
|
|
| 334 |
+ if progress:
|
|
| 335 |
+ progress()
|
|
| 336 |
+ |
|
| 337 |
+ # Informational message about the side effects of the cleanup
|
|
| 338 |
+ self._message(MessageType.INFO, "Cleanup completed",
|
|
| 339 |
+ detail=("Removed {} refs and saving {} disk space.\n" +
|
|
| 340 |
+ "Cache usage is now: {}")
|
|
| 341 |
+ .format(removed_ref_count,
|
|
| 342 |
+ utils._pretty_size(space_saved, dec_places=2),
|
|
| 343 |
+ utils._pretty_size(self.get_cache_size(), dec_places=2)))
|
|
| 344 |
+ |
|
| 271 | 345 |
return self.get_cache_size()
|
| 272 | 346 |
|
| 273 | 347 |
# compute_cache_size()
|
| ... | ... | @@ -279,7 +353,14 @@ class ArtifactCache(): |
| 279 | 353 |
# (int): The size of the artifact cache.
|
| 280 | 354 |
#
|
| 281 | 355 |
def compute_cache_size(self):
|
| 282 |
- self._cache_size = self.cas.calculate_cache_size()
|
|
| 356 |
+ old_cache_size = self._cache_size
|
|
| 357 |
+ new_cache_size = self.cas.calculate_cache_size()
|
|
| 358 |
+ |
|
| 359 |
+ if old_cache_size != new_cache_size:
|
|
| 360 |
+ self._cache_size = new_cache_size
|
|
| 361 |
+ |
|
| 362 |
+ usage = ArtifactCacheUsage(self)
|
|
| 363 |
+ self._message(MessageType.STATUS, "Cache usage recomputed: {}".format(usage))
|
|
| 283 | 364 |
|
| 284 | 365 |
return self._cache_size
|
| 285 | 366 |
|
| ... | ... | @@ -307,7 +388,7 @@ class ArtifactCache(): |
| 307 | 388 |
# it is greater than the actual cache size.
|
| 308 | 389 |
#
|
| 309 | 390 |
# Returns:
|
| 310 |
- # (int) An approximation of the artifact cache size.
|
|
| 391 |
+ # (int) An approximation of the artifact cache size, in bytes.
|
|
| 311 | 392 |
#
|
| 312 | 393 |
def get_cache_size(self):
|
| 313 | 394 |
|
| ... | ... | @@ -848,19 +929,16 @@ class ArtifactCache(): |
| 848 | 929 |
else:
|
| 849 | 930 |
headroom = 2e9
|
| 850 | 931 |
|
| 851 |
- artifactdir_volume = self.context.artifactdir
|
|
| 852 |
- while not os.path.exists(artifactdir_volume):
|
|
| 853 |
- artifactdir_volume = os.path.dirname(artifactdir_volume)
|
|
| 854 |
- |
|
| 855 | 932 |
try:
|
| 856 |
- cache_quota = utils._parse_size(self.context.config_cache_quota, artifactdir_volume)
|
|
| 933 |
+ cache_quota = utils._parse_size(self.context.config_cache_quota,
|
|
| 934 |
+ self.context.artifactdir)
|
|
| 857 | 935 |
except utils.UtilError as e:
|
| 858 | 936 |
raise LoadError(LoadErrorReason.INVALID_DATA,
|
| 859 | 937 |
"{}\nPlease specify the value in bytes or as a % of full disk space.\n"
|
| 860 | 938 |
"\nValid values are, for example: 800M 10G 1T 50%\n"
|
| 861 | 939 |
.format(str(e))) from e
|
| 862 | 940 |
|
| 863 |
- available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
|
|
| 941 |
+ total_size, available_space = self._get_cache_volume_size()
|
|
| 864 | 942 |
cache_size = self.get_cache_size()
|
| 865 | 943 |
|
| 866 | 944 |
# Ensure system has enough storage for the cache_quota
|
| ... | ... | @@ -900,22 +978,25 @@ class ArtifactCache(): |
| 900 | 978 |
# if we end up writing more than 2G, but hey, this stuff is
|
| 901 | 979 |
# already really fuzzy.
|
| 902 | 980 |
#
|
| 981 |
+ self._cache_quota_original = cache_quota
|
|
| 903 | 982 |
self._cache_quota = cache_quota - headroom
|
| 904 | 983 |
self._cache_lower_threshold = self._cache_quota / 2
|
| 905 | 984 |
|
| 906 |
- # _get_volume_space_info_for
|
|
| 907 |
- #
|
|
| 908 |
- # Get the available space and total space for the given volume
|
|
| 985 |
+ # _get_cache_volume_size()
|
|
| 909 | 986 |
#
|
| 910 |
- # Args:
|
|
| 911 |
- # volume: volume for which to get the size
|
|
| 987 |
+ # Get the available space and total space for the volume on
|
|
| 988 |
+ # which the artifact cache is located.
|
|
| 912 | 989 |
#
|
| 913 | 990 |
# Returns:
|
| 914 |
- # A tuple containing first the availabe number of bytes on the requested
|
|
| 915 |
- # volume, then the total number of bytes of the volume.
|
|
| 916 |
- def _get_volume_space_info_for(self, volume):
|
|
| 917 |
- stat = os.statvfs(volume)
|
|
| 918 |
- return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
|
|
| 991 |
+ # (int): The total number of bytes on the volume
|
|
| 992 |
+ # (int): The number of available bytes on the volume
|
|
| 993 |
+ #
|
|
| 994 |
+ # NOTE: We use this stub to allow the test cases
|
|
| 995 |
+ # to override what an artifact cache thinks
|
|
| 996 |
+ # about it's disk size and available bytes.
|
|
| 997 |
+ #
|
|
| 998 |
+ def _get_cache_volume_size(self):
|
|
| 999 |
+ return utils._get_volume_size(self.context.artifactdir)
|
|
| 919 | 1000 |
|
| 920 | 1001 |
|
| 921 | 1002 |
# _configured_remote_artifact_cache_specs():
|
| ... | ... | @@ -30,7 +30,7 @@ from . import _yaml |
| 30 | 30 |
from ._exceptions import LoadError, LoadErrorReason, BstError
|
| 31 | 31 |
from ._message import Message, MessageType
|
| 32 | 32 |
from ._profile import Topics, profile_start, profile_end
|
| 33 |
-from ._artifactcache import ArtifactCache
|
|
| 33 |
+from ._artifactcache import ArtifactCache, ArtifactCacheUsage
|
|
| 34 | 34 |
from ._cas import CASCache
|
| 35 | 35 |
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE
|
| 36 | 36 |
from .plugin import _plugin_lookup
|
| ... | ... | @@ -289,6 +289,16 @@ class Context(): |
| 289 | 289 |
|
| 290 | 290 |
return self._artifactcache
|
| 291 | 291 |
|
| 292 |
+ # get_artifact_cache_usage()
|
|
| 293 |
+ #
|
|
| 294 |
+ # Fetches the current usage of the artifact cache
|
|
| 295 |
+ #
|
|
| 296 |
+ # Returns:
|
|
| 297 |
+ # (ArtifactCacheUsage): The current status
|
|
| 298 |
+ #
|
|
| 299 |
+ def get_artifact_cache_usage(self):
|
|
| 300 |
+ return ArtifactCacheUsage(self.artifactcache)
|
|
| 301 |
+ |
|
| 292 | 302 |
# add_project():
|
| 293 | 303 |
#
|
| 294 | 304 |
# Add a project to the context.
|
| ... | ... | @@ -194,11 +194,6 @@ class App(): |
| 194 | 194 |
except BstError as e:
|
| 195 | 195 |
self._error_exit(e, "Error instantiating platform")
|
| 196 | 196 |
|
| 197 |
- try:
|
|
| 198 |
- self.context.artifactcache.preflight()
|
|
| 199 |
- except BstError as e:
|
|
| 200 |
- self._error_exit(e, "Error instantiating artifact cache")
|
|
| 201 |
- |
|
| 202 | 197 |
# Create the logger right before setting the message handler
|
| 203 | 198 |
self.logger = LogLine(self.context,
|
| 204 | 199 |
self._content_profile,
|
| ... | ... | @@ -211,6 +206,13 @@ class App(): |
| 211 | 206 |
# Propagate pipeline feedback to the user
|
| 212 | 207 |
self.context.set_message_handler(self._message_handler)
|
| 213 | 208 |
|
| 209 |
+ # Preflight the artifact cache after initializing logging,
|
|
| 210 |
+ # this can cause messages to be emitted.
|
|
| 211 |
+ try:
|
|
| 212 |
+ self.context.artifactcache.preflight()
|
|
| 213 |
+ except BstError as e:
|
|
| 214 |
+ self._error_exit(e, "Error instantiating artifact cache")
|
|
| 215 |
+ |
|
| 214 | 216 |
#
|
| 215 | 217 |
# Load the Project
|
| 216 | 218 |
#
|
| ... | ... | @@ -353,13 +353,17 @@ class _StatusHeader(): |
| 353 | 353 |
def render(self, line_length, elapsed):
|
| 354 | 354 |
project = self._context.get_toplevel_project()
|
| 355 | 355 |
line_length = max(line_length, 80)
|
| 356 |
- size = 0
|
|
| 357 |
- text = ''
|
|
| 358 | 356 |
|
| 357 |
+ #
|
|
| 358 |
+ # Line 1: Session time, project name, session / total elements
|
|
| 359 |
+ #
|
|
| 360 |
+ # ========= 00:00:00 project-name (143/387) =========
|
|
| 361 |
+ #
|
|
| 359 | 362 |
session = str(len(self._stream.session_elements))
|
| 360 | 363 |
total = str(len(self._stream.total_elements))
|
| 361 | 364 |
|
| 362 |
- # Format and calculate size for target and overall time code
|
|
| 365 |
+ size = 0
|
|
| 366 |
+ text = ''
|
|
| 363 | 367 |
size += len(total) + len(session) + 4 # Size for (N/N) with a leading space
|
| 364 | 368 |
size += 8 # Size of time code
|
| 365 | 369 |
size += len(project.name) + 1
|
| ... | ... | @@ -372,6 +376,12 @@ class _StatusHeader(): |
| 372 | 376 |
self._format_profile.fmt(')')
|
| 373 | 377 |
|
| 374 | 378 |
line1 = self._centered(text, size, line_length, '=')
|
| 379 |
+ |
|
| 380 |
+ #
|
|
| 381 |
+ # Line 2: Dynamic list of queue status reports
|
|
| 382 |
+ #
|
|
| 383 |
+ # (Fetched:0 117 0)→ (Built:4 0 0)
|
|
| 384 |
+ #
|
|
| 375 | 385 |
size = 0
|
| 376 | 386 |
text = ''
|
| 377 | 387 |
|
| ... | ... | @@ -389,10 +399,28 @@ class _StatusHeader(): |
| 389 | 399 |
|
| 390 | 400 |
line2 = self._centered(text, size, line_length, ' ')
|
| 391 | 401 |
|
| 392 |
- size = 24
|
|
| 393 |
- text = self._format_profile.fmt("~~~~~ ") + \
|
|
| 394 |
- self._content_profile.fmt('Active Tasks') + \
|
|
| 395 |
- self._format_profile.fmt(" ~~~~~")
|
|
| 402 |
+ #
|
|
| 403 |
+ # Line 3: Cache usage percentage report
|
|
| 404 |
+ #
|
|
| 405 |
+ # ~~~~~~ cache: 69% ~~~~~~
|
|
| 406 |
+ #
|
|
| 407 |
+ usage = self._context.get_artifact_cache_usage()
|
|
| 408 |
+ usage_percent = '{}%'.format(usage.used_percent)
|
|
| 409 |
+ |
|
| 410 |
+ size = 21
|
|
| 411 |
+ size += len(usage_percent)
|
|
| 412 |
+ if usage.used_percent >= 95:
|
|
| 413 |
+ formatted_usage_percent = self._error_profile.fmt(usage_percent)
|
|
| 414 |
+ elif usage.used_percent >= 80:
|
|
| 415 |
+ formatted_usage_percent = self._content_profile.fmt(usage_percent)
|
|
| 416 |
+ else:
|
|
| 417 |
+ formatted_usage_percent = self._success_profile.fmt(usage_percent)
|
|
| 418 |
+ |
|
| 419 |
+ text = self._format_profile.fmt("~~~~~~ ") + \
|
|
| 420 |
+ self._content_profile.fmt('cache') + \
|
|
| 421 |
+ self._format_profile.fmt(': ') + \
|
|
| 422 |
+ formatted_usage_percent + \
|
|
| 423 |
+ self._format_profile.fmt(' ~~~~~~')
|
|
| 396 | 424 |
line3 = self._centered(text, size, line_length, ' ')
|
| 397 | 425 |
|
| 398 | 426 |
return line1 + '\n' + line2 + '\n' + line3
|
| ... | ... | @@ -175,29 +175,22 @@ class TypeName(Widget): |
| 175 | 175 |
# A widget for displaying the Element name
|
| 176 | 176 |
class ElementName(Widget):
|
| 177 | 177 |
|
| 178 |
- def __init__(self, context, content_profile, format_profile):
|
|
| 179 |
- super(ElementName, self).__init__(context, content_profile, format_profile)
|
|
| 180 |
- |
|
| 181 |
- # Pre initialization format string, before we know the length of
|
|
| 182 |
- # element names in the pipeline
|
|
| 183 |
- self._fmt_string = '{: <30}'
|
|
| 184 |
- |
|
| 185 | 178 |
def render(self, message):
|
| 179 |
+ action_name = message.action_name
|
|
| 186 | 180 |
element_id = message.task_id or message.unique_id
|
| 187 |
- if element_id is None:
|
|
| 188 |
- return ""
|
|
| 189 |
- |
|
| 190 |
- plugin = _plugin_lookup(element_id)
|
|
| 191 |
- name = plugin._get_full_name()
|
|
| 181 |
+ if element_id is not None:
|
|
| 182 |
+ plugin = _plugin_lookup(element_id)
|
|
| 183 |
+ name = plugin._get_full_name()
|
|
| 184 |
+ name = '{: <30}'.format(name)
|
|
| 185 |
+ else:
|
|
| 186 |
+ name = 'core activity'
|
|
| 187 |
+ name = '{: <30}'.format(name)
|
|
| 192 | 188 |
|
| 193 |
- # Sneak the action name in with the element name
|
|
| 194 |
- action_name = message.action_name
|
|
| 195 | 189 |
if not action_name:
|
| 196 | 190 |
action_name = "Main"
|
| 197 | 191 |
|
| 198 | 192 |
return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \
|
| 199 |
- self.format_profile.fmt(':') + \
|
|
| 200 |
- self.content_profile.fmt(self._fmt_string.format(name))
|
|
| 193 |
+ self.format_profile.fmt(':') + self.content_profile.fmt(name)
|
|
| 201 | 194 |
|
| 202 | 195 |
|
| 203 | 196 |
# A widget for displaying the primary message text
|
| ... | ... | @@ -219,9 +212,12 @@ class CacheKey(Widget): |
| 219 | 212 |
def render(self, message):
|
| 220 | 213 |
|
| 221 | 214 |
element_id = message.task_id or message.unique_id
|
| 222 |
- if element_id is None or not self._key_length:
|
|
| 215 |
+ if not self._key_length:
|
|
| 223 | 216 |
return ""
|
| 224 | 217 |
|
| 218 |
+ if element_id is None:
|
|
| 219 |
+ return ' ' * self._key_length
|
|
| 220 |
+ |
|
| 225 | 221 |
missing = False
|
| 226 | 222 |
key = ' ' * self._key_length
|
| 227 | 223 |
plugin = _plugin_lookup(element_id)
|
| ... | ... | @@ -456,6 +452,7 @@ class LogLine(Widget): |
| 456 | 452 |
values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
|
| 457 | 453 |
values["Project"] = "{} ({})".format(project.name, project.directory)
|
| 458 | 454 |
values["Targets"] = ", ".join([t.name for t in stream.targets])
|
| 455 |
+ values["Cache Usage"] = "{}".format(context.get_artifact_cache_usage())
|
|
| 459 | 456 |
text += self._format_values(values)
|
| 460 | 457 |
|
| 461 | 458 |
# User configurations
|
| ... | ... | @@ -28,7 +28,18 @@ class CleanupJob(Job): |
| 28 | 28 |
self._artifacts = context.artifactcache
|
| 29 | 29 |
|
| 30 | 30 |
def child_process(self):
|
| 31 |
- return self._artifacts.clean()
|
|
| 31 |
+ def progress():
|
|
| 32 |
+ self.send_message('update-cache-size',
|
|
| 33 |
+ self._artifacts.get_cache_size())
|
|
| 34 |
+ return self._artifacts.clean(progress)
|
|
| 35 |
+ |
|
| 36 |
+ def handle_message(self, message_type, message):
|
|
| 37 |
+ |
|
| 38 |
+ # Update the cache size in the main process as we go,
|
|
| 39 |
+ # this provides better feedback in the UI.
|
|
| 40 |
+ if message_type == 'update-cache-size':
|
|
| 41 |
+ self._artifacts.set_cache_size(message)
|
|
| 42 |
+ return True
|
|
| 32 | 43 |
|
| 33 | 44 |
def parent_complete(self, status, result):
|
| 34 | 45 |
if status == JobStatus.OK:
|
| ... | ... | @@ -58,10 +58,10 @@ class JobStatus(): |
| 58 | 58 |
|
| 59 | 59 |
|
| 60 | 60 |
# Used to distinguish between status messages and return values
|
| 61 |
-class Envelope():
|
|
| 61 |
+class _Envelope():
|
|
| 62 | 62 |
def __init__(self, message_type, message):
|
| 63 |
- self._message_type = message_type
|
|
| 64 |
- self._message = message
|
|
| 63 |
+ self.message_type = message_type
|
|
| 64 |
+ self.message = message
|
|
| 65 | 65 |
|
| 66 | 66 |
|
| 67 | 67 |
# Process class that doesn't call waitpid on its own.
|
| ... | ... | @@ -275,10 +275,37 @@ class Job(): |
| 275 | 275 |
def set_task_id(self, task_id):
|
| 276 | 276 |
self._task_id = task_id
|
| 277 | 277 |
|
| 278 |
+ # send_message()
|
|
| 279 |
+ #
|
|
| 280 |
+ # To be called from inside Job.child_process() implementations
|
|
| 281 |
+ # to send messages to the main process during processing.
|
|
| 282 |
+ #
|
|
| 283 |
+ # These messages will be processed by the class's Job.handle_message()
|
|
| 284 |
+ # implementation.
|
|
| 285 |
+ #
|
|
| 286 |
+ def send_message(self, message_type, message):
|
|
| 287 |
+ self._queue.put(_Envelope(message_type, message))
|
|
| 288 |
+ |
|
| 278 | 289 |
#######################################################
|
| 279 | 290 |
# Abstract Methods #
|
| 280 | 291 |
#######################################################
|
| 281 | 292 |
|
| 293 |
+ # handle_message()
|
|
| 294 |
+ #
|
|
| 295 |
+ # Handle a custom message. This will be called in the main process in
|
|
| 296 |
+ # response to any messages sent to the main proces using the
|
|
| 297 |
+ # Job.send_message() API from inside a Job.child_process() implementation
|
|
| 298 |
+ #
|
|
| 299 |
+ # Args:
|
|
| 300 |
+ # message_type (str): A string to identify the message type
|
|
| 301 |
+ # message (any): A simple serializable object
|
|
| 302 |
+ #
|
|
| 303 |
+ # Returns:
|
|
| 304 |
+ # (bool): Should return a truthy value if message_type is handled.
|
|
| 305 |
+ #
|
|
| 306 |
+ def handle_message(self, message_type, message):
|
|
| 307 |
+ return False
|
|
| 308 |
+ |
|
| 282 | 309 |
# parent_complete()
|
| 283 | 310 |
#
|
| 284 | 311 |
# This will be executed after the job finishes, and is expected to
|
| ... | ... | @@ -416,7 +443,7 @@ class Job(): |
| 416 | 443 |
elapsed=elapsed, detail=e.detail,
|
| 417 | 444 |
logfile=filename, sandbox=e.sandbox)
|
| 418 | 445 |
|
| 419 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
| 446 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
| 420 | 447 |
|
| 421 | 448 |
# Report the exception to the parent (for internal testing purposes)
|
| 422 | 449 |
self._child_send_error(e)
|
| ... | ... | @@ -442,7 +469,7 @@ class Job(): |
| 442 | 469 |
|
| 443 | 470 |
else:
|
| 444 | 471 |
# No exception occurred in the action
|
| 445 |
- self._queue.put(Envelope('child_data', self.child_process_data()))
|
|
| 472 |
+ self._queue.put(_Envelope('child_data', self.child_process_data()))
|
|
| 446 | 473 |
self._child_send_result(result)
|
| 447 | 474 |
|
| 448 | 475 |
elapsed = datetime.datetime.now() - starttime
|
| ... | ... | @@ -469,7 +496,7 @@ class Job(): |
| 469 | 496 |
domain = e.domain
|
| 470 | 497 |
reason = e.reason
|
| 471 | 498 |
|
| 472 |
- envelope = Envelope('error', {
|
|
| 499 |
+ envelope = _Envelope('error', {
|
|
| 473 | 500 |
'domain': domain,
|
| 474 | 501 |
'reason': reason
|
| 475 | 502 |
})
|
| ... | ... | @@ -487,7 +514,7 @@ class Job(): |
| 487 | 514 |
#
|
| 488 | 515 |
def _child_send_result(self, result):
|
| 489 | 516 |
if result is not None:
|
| 490 |
- envelope = Envelope('result', result)
|
|
| 517 |
+ envelope = _Envelope('result', result)
|
|
| 491 | 518 |
self._queue.put(envelope)
|
| 492 | 519 |
|
| 493 | 520 |
# _child_shutdown()
|
| ... | ... | @@ -524,7 +551,7 @@ class Job(): |
| 524 | 551 |
if message.message_type == MessageType.LOG:
|
| 525 | 552 |
return
|
| 526 | 553 |
|
| 527 |
- self._queue.put(Envelope('message', message))
|
|
| 554 |
+ self._queue.put(_Envelope('message', message))
|
|
| 528 | 555 |
|
| 529 | 556 |
# _parent_shutdown()
|
| 530 | 557 |
#
|
| ... | ... | @@ -588,24 +615,28 @@ class Job(): |
| 588 | 615 |
if not self._listening:
|
| 589 | 616 |
return
|
| 590 | 617 |
|
| 591 |
- if envelope._message_type == 'message':
|
|
| 618 |
+ if envelope.message_type == 'message':
|
|
| 592 | 619 |
# Propagate received messages from children
|
| 593 | 620 |
# back through the context.
|
| 594 |
- self._scheduler.context.message(envelope._message)
|
|
| 595 |
- elif envelope._message_type == 'error':
|
|
| 621 |
+ self._scheduler.context.message(envelope.message)
|
|
| 622 |
+ elif envelope.message_type == 'error':
|
|
| 596 | 623 |
# For regression tests only, save the last error domain / reason
|
| 597 | 624 |
# reported from a child task in the main process, this global state
|
| 598 | 625 |
# is currently managed in _exceptions.py
|
| 599 |
- set_last_task_error(envelope._message['domain'],
|
|
| 600 |
- envelope._message['reason'])
|
|
| 601 |
- elif envelope._message_type == 'result':
|
|
| 626 |
+ set_last_task_error(envelope.message['domain'],
|
|
| 627 |
+ envelope.message['reason'])
|
|
| 628 |
+ elif envelope.message_type == 'result':
|
|
| 602 | 629 |
assert self._result is None
|
| 603 |
- self._result = envelope._message
|
|
| 604 |
- elif envelope._message_type == 'child_data':
|
|
| 630 |
+ self._result = envelope.message
|
|
| 631 |
+ elif envelope.message_type == 'child_data':
|
|
| 605 | 632 |
# If we retry a job, we assign a new value to this
|
| 606 |
- self.child_data = envelope._message
|
|
| 607 |
- else:
|
|
| 608 |
- raise Exception()
|
|
| 633 |
+ self.child_data = envelope.message
|
|
| 634 |
+ |
|
| 635 |
+ # Try Job subclass specific messages now
|
|
| 636 |
+ elif not self.handle_message(envelope.message_type,
|
|
| 637 |
+ envelope.message):
|
|
| 638 |
+ assert 0, "Unhandled message type '{}': {}" \
|
|
| 639 |
+ .format(envelope.message_type, envelope.message)
|
|
| 609 | 640 |
|
| 610 | 641 |
# _parent_process_queue()
|
| 611 | 642 |
#
|
| ... | ... | @@ -40,8 +40,8 @@ class SchedStatus(): |
| 40 | 40 |
|
| 41 | 41 |
# Some action names for the internal jobs we launch
|
| 42 | 42 |
#
|
| 43 |
-_ACTION_NAME_CLEANUP = 'cleanup'
|
|
| 44 |
-_ACTION_NAME_CACHE_SIZE = 'cache_size'
|
|
| 43 |
+_ACTION_NAME_CLEANUP = 'clean'
|
|
| 44 |
+_ACTION_NAME_CACHE_SIZE = 'size'
|
|
| 45 | 45 |
|
| 46 | 46 |
|
| 47 | 47 |
# Scheduler()
|
| ... | ... | @@ -633,6 +633,27 @@ def _get_dir_size(path): |
| 633 | 633 |
return get_size(path)
|
| 634 | 634 |
|
| 635 | 635 |
|
| 636 |
+# _get_volume_size():
|
|
| 637 |
+#
|
|
| 638 |
+# Gets the overall usage and total size of a mounted filesystem in bytes.
|
|
| 639 |
+#
|
|
| 640 |
+# Args:
|
|
| 641 |
+# path (str): The path to check
|
|
| 642 |
+#
|
|
| 643 |
+# Returns:
|
|
| 644 |
+# (int): The total number of bytes on the volume
|
|
| 645 |
+# (int): The number of available bytes on the volume
|
|
| 646 |
+#
|
|
| 647 |
+def _get_volume_size(path):
|
|
| 648 |
+ try:
|
|
| 649 |
+ stat_ = os.statvfs(path)
|
|
| 650 |
+ except OSError as e:
|
|
| 651 |
+ raise UtilError("Failed to retrieve stats on volume for path '{}': {}"
|
|
| 652 |
+ .format(path, e)) from e
|
|
| 653 |
+ |
|
| 654 |
+ return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail
|
|
| 655 |
+ |
|
| 656 |
+ |
|
| 636 | 657 |
# _parse_size():
|
| 637 | 658 |
#
|
| 638 | 659 |
# Convert a string representing data size to a number of
|
| ... | ... | @@ -667,8 +688,7 @@ def _parse_size(size, volume): |
| 667 | 688 |
if num > 100:
|
| 668 | 689 |
raise UtilError("{}% is not a valid percentage value.".format(num))
|
| 669 | 690 |
|
| 670 |
- stat_ = os.statvfs(volume)
|
|
| 671 |
- disk_size = stat_.f_blocks * stat_.f_bsize
|
|
| 691 |
+ disk_size, _ = _get_volume_size(volume)
|
|
| 672 | 692 |
|
| 673 | 693 |
return disk_size * (num / 100)
|
| 674 | 694 |
|
| ... | ... | @@ -80,7 +80,7 @@ def test_quota_over_1024T(cli, tmpdir): |
| 80 | 80 |
_yaml.dump({'name': 'main'}, str(project.join("project.conf")))
|
| 81 | 81 |
|
| 82 | 82 |
volume_space_patch = mock.patch(
|
| 83 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
| 83 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
| 84 | 84 |
autospec=True,
|
| 85 | 85 |
return_value=(1025 * TiB, 1025 * TiB)
|
| 86 | 86 |
)
|
| ... | ... | @@ -18,6 +18,7 @@ |
| 18 | 18 |
#
|
| 19 | 19 |
|
| 20 | 20 |
import os
|
| 21 |
+import re
|
|
| 21 | 22 |
from unittest import mock
|
| 22 | 23 |
|
| 23 | 24 |
import pytest
|
| ... | ... | @@ -66,8 +67,9 @@ def test_artifact_expires(cli, datafiles, tmpdir): |
| 66 | 67 |
res.assert_success()
|
| 67 | 68 |
|
| 68 | 69 |
# Check that the correct element remains in the cache
|
| 69 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
| 70 |
- assert cli.get_element_state(project, 'target2.bst') == 'cached'
|
|
| 70 |
+ states = cli.get_element_states(project, ['target.bst', 'target2.bst'])
|
|
| 71 |
+ assert states['target.bst'] != 'cached'
|
|
| 72 |
+ assert states['target2.bst'] == 'cached'
|
|
| 71 | 73 |
|
| 72 | 74 |
|
| 73 | 75 |
# Ensure that we don't end up deleting the whole cache (or worse) if
|
| ... | ... | @@ -144,9 +146,11 @@ def test_expiry_order(cli, datafiles, tmpdir): |
| 144 | 146 |
# have been removed.
|
| 145 | 147 |
# Note that buildstream will reduce the cache to 50% of the
|
| 146 | 148 |
# original size - we therefore remove multiple elements.
|
| 147 |
- |
|
| 148 |
- assert (tuple(cli.get_element_state(project, element) for element in
|
|
| 149 |
- ('unrelated.bst', 'target.bst', 'target2.bst', 'dep.bst', 'expire.bst')) ==
|
|
| 149 |
+ check_elements = [
|
|
| 150 |
+ 'unrelated.bst', 'target.bst', 'target2.bst', 'dep.bst', 'expire.bst'
|
|
| 151 |
+ ]
|
|
| 152 |
+ states = cli.get_element_states(project, check_elements)
|
|
| 153 |
+ assert (tuple(states[element] for element in check_elements) ==
|
|
| 150 | 154 |
('buildable', 'buildable', 'buildable', 'cached', 'cached', ))
|
| 151 | 155 |
|
| 152 | 156 |
|
| ... | ... | @@ -176,8 +180,9 @@ def test_keep_dependencies(cli, datafiles, tmpdir): |
| 176 | 180 |
res.assert_success()
|
| 177 | 181 |
|
| 178 | 182 |
# Check that the correct element remains in the cache
|
| 179 |
- assert cli.get_element_state(project, 'dependency.bst') == 'cached'
|
|
| 180 |
- assert cli.get_element_state(project, 'unrelated.bst') == 'cached'
|
|
| 183 |
+ states = cli.get_element_states(project, ['dependency.bst', 'unrelated.bst'])
|
|
| 184 |
+ assert states['dependency.bst'] == 'cached'
|
|
| 185 |
+ assert states['unrelated.bst'] == 'cached'
|
|
| 181 | 186 |
|
| 182 | 187 |
# We try to build an element which depends on the LRU artifact,
|
| 183 | 188 |
# and could therefore fail if we didn't make sure dependencies
|
| ... | ... | @@ -192,9 +197,10 @@ def test_keep_dependencies(cli, datafiles, tmpdir): |
| 192 | 197 |
res = cli.run(project=project, args=['build', 'target.bst'])
|
| 193 | 198 |
res.assert_success()
|
| 194 | 199 |
|
| 195 |
- assert cli.get_element_state(project, 'unrelated.bst') != 'cached'
|
|
| 196 |
- assert cli.get_element_state(project, 'dependency.bst') == 'cached'
|
|
| 197 |
- assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
| 200 |
+ states = cli.get_element_states(project, ['target.bst', 'unrelated.bst'])
|
|
| 201 |
+ assert states['target.bst'] == 'cached'
|
|
| 202 |
+ assert states['dependency.bst'] == 'cached'
|
|
| 203 |
+ assert states['unrelated.bst'] != 'cached'
|
|
| 198 | 204 |
|
| 199 | 205 |
|
| 200 | 206 |
# Assert that we never delete a dependency required for a build tree
|
| ... | ... | @@ -239,11 +245,11 @@ def test_never_delete_required(cli, datafiles, tmpdir): |
| 239 | 245 |
# life there may potentially be N-builders cached artifacts
|
| 240 | 246 |
# which exceed the quota
|
| 241 | 247 |
#
|
| 242 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
| 243 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
| 244 |
- |
|
| 245 |
- assert cli.get_element_state(project, 'dep3.bst') != 'cached'
|
|
| 246 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
| 248 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
| 249 |
+ assert states['dep1.bst'] == 'cached'
|
|
| 250 |
+ assert states['dep2.bst'] == 'cached'
|
|
| 251 |
+ assert states['dep3.bst'] != 'cached'
|
|
| 252 |
+ assert states['target.bst'] != 'cached'
|
|
| 247 | 253 |
|
| 248 | 254 |
|
| 249 | 255 |
# Assert that we never delete a dependency required for a build tree,
|
| ... | ... | @@ -275,10 +281,11 @@ def test_never_delete_required_track(cli, datafiles, tmpdir): |
| 275 | 281 |
res.assert_success()
|
| 276 | 282 |
|
| 277 | 283 |
# They should all be cached
|
| 278 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
| 279 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
| 280 |
- assert cli.get_element_state(project, 'dep3.bst') == 'cached'
|
|
| 281 |
- assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
| 284 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
| 285 |
+ assert states['dep1.bst'] == 'cached'
|
|
| 286 |
+ assert states['dep2.bst'] == 'cached'
|
|
| 287 |
+ assert states['dep3.bst'] == 'cached'
|
|
| 288 |
+ assert states['target.bst'] == 'cached'
|
|
| 282 | 289 |
|
| 283 | 290 |
# Now increase the size of all the elements
|
| 284 | 291 |
#
|
| ... | ... | @@ -296,10 +303,11 @@ def test_never_delete_required_track(cli, datafiles, tmpdir): |
| 296 | 303 |
|
| 297 | 304 |
# Expect the same result that we did in test_never_delete_required()
|
| 298 | 305 |
#
|
| 299 |
- assert cli.get_element_state(project, 'dep1.bst') == 'cached'
|
|
| 300 |
- assert cli.get_element_state(project, 'dep2.bst') == 'cached'
|
|
| 301 |
- assert cli.get_element_state(project, 'dep3.bst') != 'cached'
|
|
| 302 |
- assert cli.get_element_state(project, 'target.bst') != 'cached'
|
|
| 306 |
+ states = cli.get_element_states(project, ['target.bst'])
|
|
| 307 |
+ assert states['dep1.bst'] == 'cached'
|
|
| 308 |
+ assert states['dep2.bst'] == 'cached'
|
|
| 309 |
+ assert states['dep3.bst'] != 'cached'
|
|
| 310 |
+ assert states['target.bst'] != 'cached'
|
|
| 303 | 311 |
|
| 304 | 312 |
|
| 305 | 313 |
# Ensure that only valid cache quotas make it through the loading
|
| ... | ... | @@ -350,9 +358,9 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, err_domain, err_reas |
| 350 | 358 |
total_space = 10000
|
| 351 | 359 |
|
| 352 | 360 |
volume_space_patch = mock.patch(
|
| 353 |
- "buildstream._artifactcache.ArtifactCache._get_volume_space_info_for",
|
|
| 361 |
+ "buildstream._artifactcache.ArtifactCache._get_cache_volume_size",
|
|
| 354 | 362 |
autospec=True,
|
| 355 |
- return_value=(free_space, total_space),
|
|
| 363 |
+ return_value=(total_space, free_space),
|
|
| 356 | 364 |
)
|
| 357 | 365 |
|
| 358 | 366 |
cache_size_patch = mock.patch(
|
| ... | ... | @@ -418,3 +426,66 @@ def test_extract_expiry(cli, datafiles, tmpdir): |
| 418 | 426 |
|
| 419 | 427 |
assert os.path.isdir(refsdirtarget2)
|
| 420 | 428 |
assert not os.path.exists(refsdirtarget)
|
| 429 |
+ |
|
| 430 |
+ |
|
| 431 |
+# Ensures that when launching BuildStream with a full artifact cache,
|
|
| 432 |
+# the cache size and cleanup jobs are run before any other jobs.
|
|
| 433 |
+#
|
|
| 434 |
+@pytest.mark.datafiles(DATA_DIR)
|
|
| 435 |
+def test_cleanup_first(cli, datafiles, tmpdir):
|
|
| 436 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
| 437 |
+ element_path = 'elements'
|
|
| 438 |
+ cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree')
|
|
| 439 |
+ checkout = os.path.join(project, 'checkout')
|
|
| 440 |
+ |
|
| 441 |
+ cli.configure({
|
|
| 442 |
+ 'cache': {
|
|
| 443 |
+ 'quota': 10000000,
|
|
| 444 |
+ }
|
|
| 445 |
+ })
|
|
| 446 |
+ |
|
| 447 |
+ # Create an element that uses almost the entire cache (an empty
|
|
| 448 |
+ # ostree cache starts at about ~10KiB, so we need a bit of a
|
|
| 449 |
+ # buffer)
|
|
| 450 |
+ create_element_size('target.bst', project, element_path, [], 8000000)
|
|
| 451 |
+ res = cli.run(project=project, args=['build', 'target.bst'])
|
|
| 452 |
+ res.assert_success()
|
|
| 453 |
+ |
|
| 454 |
+ assert cli.get_element_state(project, 'target.bst') == 'cached'
|
|
| 455 |
+ |
|
| 456 |
+ # Now configure with a smaller quota, create a situation
|
|
| 457 |
+ # where the cache must be cleaned up before building anything else.
|
|
| 458 |
+ #
|
|
| 459 |
+ # Fix the fetchers and builders just to ensure a predictable
|
|
| 460 |
+ # sequence of events (although it does not effect this test)
|
|
| 461 |
+ cli.configure({
|
|
| 462 |
+ 'cache': {
|
|
| 463 |
+ 'quota': 5000000,
|
|
| 464 |
+ },
|
|
| 465 |
+ 'scheduler': {
|
|
| 466 |
+ 'fetchers': 1,
|
|
| 467 |
+ 'builders': 1
|
|
| 468 |
+ }
|
|
| 469 |
+ })
|
|
| 470 |
+ |
|
| 471 |
+ # Our cache is now more than full, BuildStream
|
|
| 472 |
+ create_element_size('target2.bst', project, element_path, [], 4000000)
|
|
| 473 |
+ res = cli.run(project=project, args=['build', 'target2.bst'])
|
|
| 474 |
+ res.assert_success()
|
|
| 475 |
+ |
|
| 476 |
+ # Find all of the activity (like push, pull, fetch) lines
|
|
| 477 |
+ results = re.findall(r'\[.*\]\[.*\]\[\s*(\S+):.*\]\s*START\s*.*\.log', res.stderr)
|
|
| 478 |
+ |
|
| 479 |
+ # Don't bother checking the order of 'fetch', it is allowed to start
|
|
| 480 |
+ # before or after the initial cache size job, runs in parallel, and does
|
|
| 481 |
+ # not require ResourceType.CACHE.
|
|
| 482 |
+ results.remove('fetch')
|
|
| 483 |
+ print(results)
|
|
| 484 |
+ |
|
| 485 |
+ # Assert the expected sequence of events
|
|
| 486 |
+ assert results == ['size', 'clean', 'build']
|
|
| 487 |
+ |
|
| 488 |
+ # Check that the correct element remains in the cache
|
|
| 489 |
+ states = cli.get_element_states(project, ['target.bst', 'target2.bst'])
|
|
| 490 |
+ assert states['target.bst'] != 'cached'
|
|
| 491 |
+ assert states['target2.bst'] == 'cached'
|
| ... | ... | @@ -41,7 +41,7 @@ def test_default_logging(cli, tmpdir, datafiles): |
| 41 | 41 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
| 42 | 42 |
result.assert_success()
|
| 43 | 43 |
|
| 44 |
- m = re.search(r"\[\d\d:\d\d:\d\d\]\[\]\[\] SUCCESS Checking sources", result.stderr)
|
|
| 44 |
+ m = re.search(r"\[\d\d:\d\d:\d\d\]\[\s*\]\[.*\] SUCCESS Checking sources", result.stderr)
|
|
| 45 | 45 |
assert(m is not None)
|
| 46 | 46 |
|
| 47 | 47 |
|
| ... | ... | @@ -77,7 +77,7 @@ def test_custom_logging(cli, tmpdir, datafiles): |
| 77 | 77 |
result = cli.run(project=project, args=['source', 'fetch', element_name])
|
| 78 | 78 |
result.assert_success()
|
| 79 | 79 |
|
| 80 |
- m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,,,SUCCESS,Checking sources", result.stderr)
|
|
| 80 |
+ m = re.search(r"\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,\s*,.*,SUCCESS,Checking sources", result.stderr)
|
|
| 81 | 81 |
assert(m is not None)
|
| 82 | 82 |
|
| 83 | 83 |
|
| ... | ... | @@ -123,7 +123,7 @@ def test_track_recurse(cli, tmpdir, datafiles, kind, amount): |
| 123 | 123 |
last_element_name = element_name
|
| 124 | 124 |
|
| 125 | 125 |
# Assert that a fetch is needed
|
| 126 |
- states = cli.get_element_states(project, last_element_name)
|
|
| 126 |
+ states = cli.get_element_states(project, [last_element_name])
|
|
| 127 | 127 |
for element_name in element_names:
|
| 128 | 128 |
assert states[element_name] == 'no reference'
|
| 129 | 129 |
|
| ... | ... | @@ -143,7 +143,7 @@ def test_track_recurse(cli, tmpdir, datafiles, kind, amount): |
| 143 | 143 |
result.assert_success()
|
| 144 | 144 |
|
| 145 | 145 |
# Assert that the base is buildable and the rest are waiting
|
| 146 |
- states = cli.get_element_states(project, last_element_name)
|
|
| 146 |
+ states = cli.get_element_states(project, [last_element_name])
|
|
| 147 | 147 |
for element_name in element_names:
|
| 148 | 148 |
if element_name == element_names[0]:
|
| 149 | 149 |
assert states[element_name] == 'buildable'
|
| ... | ... | @@ -398,13 +398,12 @@ class Cli(): |
| 398 | 398 |
#
|
| 399 | 399 |
# Returns a dictionary with the element names as keys
|
| 400 | 400 |
#
|
| 401 |
- def get_element_states(self, project, target, deps='all'):
|
|
| 401 |
+ def get_element_states(self, project, targets, deps='all'):
|
|
| 402 | 402 |
result = self.run(project=project, silent=True, args=[
|
| 403 | 403 |
'show',
|
| 404 | 404 |
'--deps', deps,
|
| 405 | 405 |
'--format', '%{name}||%{state}',
|
| 406 |
- target
|
|
| 407 |
- ])
|
|
| 406 |
+ ] + targets)
|
|
| 408 | 407 |
result.assert_success()
|
| 409 | 408 |
lines = result.output.splitlines()
|
| 410 | 409 |
states = {}
|
