Raoul Hidalgo Charman pushed to branch raoul/802-refactor-artifactcache at BuildStream / buildstream
Commits:
-
1e51062c
by Raoul Hidalgo Charman at 2019-01-03T12:21:58Z
3 changed files:
Changes:
... | ... | @@ -21,6 +21,7 @@ import multiprocessing |
21 | 21 |
import os
|
22 | 22 |
import string
|
23 | 23 |
from collections.abc import Mapping
|
24 |
+import grpc
|
|
24 | 25 |
|
25 | 26 |
from .types import _KeyStrength
|
26 | 27 |
from ._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
|
... | ... | @@ -610,13 +611,30 @@ class ArtifactCache(): |
610 | 611 |
display_key = element._get_brief_display_key()
|
611 | 612 |
element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
|
612 | 613 |
|
613 |
- if self.cas.push(refs, remote):
|
|
614 |
+ try:
|
|
615 |
+ for ref in refs:
|
|
616 |
+ # Check whether ref is already on the server in which case
|
|
617 |
+ # there is no need to push the ref
|
|
618 |
+ root_digest = self.cas.resolve_ref(ref)
|
|
619 |
+ response = remote.get_reference(ref)
|
|
620 |
+ if (response is not None and
|
|
621 |
+ response.hash == root_digest.hash and
|
|
622 |
+ response.size_bytes == root_digest.size_bytes):
|
|
623 |
+ element.info("Remote ({}) already has {} cached".format(
|
|
624 |
+ remote.spec.url, element._get_brief_display_key()))
|
|
625 |
+ continue
|
|
626 |
+ |
|
627 |
+ # upload blobs
|
|
628 |
+ self._send_directory(root_digest, remote)
|
|
629 |
+ |
|
630 |
+ remote.update_reference(ref, root_digest)
|
|
631 |
+ |
|
614 | 632 |
element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
|
615 | 633 |
pushed = True
|
616 |
- else:
|
|
617 |
- element.info("Remote ({}) already has {} cached".format(
|
|
618 |
- remote.spec.url, element._get_brief_display_key()
|
|
619 |
- ))
|
|
634 |
+ except grpc.RpcError as e:
|
|
635 |
+ if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
|
|
636 |
+ raise CASError("Failed to push ref {}: {}"
|
|
637 |
+ .format(refs, e), temporary=True) from e
|
|
620 | 638 |
|
621 | 639 |
return pushed
|
622 | 640 |
|
... | ... | @@ -742,7 +760,7 @@ class ArtifactCache(): |
742 | 760 |
return
|
743 | 761 |
|
744 | 762 |
for remote in push_remotes:
|
745 |
- self.cas.push_directory(remote, directory)
|
|
763 |
+ self._send_directory(directory.ref, remote)
|
|
746 | 764 |
|
747 | 765 |
# push_message():
|
748 | 766 |
#
|
... | ... | @@ -827,6 +845,14 @@ class ArtifactCache(): |
827 | 845 |
with self.context.timed_activity("Initializing remote caches", silent_nested=True):
|
828 | 846 |
self.initialize_remotes(on_failure=remote_failed)
|
829 | 847 |
|
848 |
+ def _send_directory(self, root_digest, remote):
|
|
849 |
+ required_blobs = self.cas.yield_directory_digests(root_digest)
|
|
850 |
+ missing_blobs = remote.find_missing_blobs(required_blobs)
|
|
851 |
+ for blob in missing_blobs.values():
|
|
852 |
+ blob_file = self.cas.objpath(blob)
|
|
853 |
+ remote.upload_blob(blob, blob_file, final=True)
|
|
854 |
+ remote.send_update_batch()
|
|
855 |
+ |
|
830 | 856 |
# _write_cache_size()
|
831 | 857 |
#
|
832 | 858 |
# Writes the given size of the artifact to the cache's size file
|
... | ... | @@ -18,23 +18,16 @@ |
18 | 18 |
# Jürg Billeter <juerg billeter codethink co uk>
|
19 | 19 |
|
20 | 20 |
import hashlib
|
21 |
-import itertools
|
|
22 | 21 |
import os
|
23 | 22 |
import stat
|
24 | 23 |
import tempfile
|
25 |
-import uuid
|
|
26 | 24 |
import contextlib
|
27 | 25 |
|
28 |
-import grpc
|
|
29 |
- |
|
30 | 26 |
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
31 |
-from .._protos.buildstream.v2 import buildstream_pb2
|
|
32 | 27 |
|
33 | 28 |
from .. import utils
|
34 | 29 |
from .._exceptions import CASError
|
35 | 30 |
|
36 |
-from .casremote import _CASBatchUpdate
|
|
37 |
- |
|
38 | 31 |
|
39 | 32 |
# A CASCache manages a CAS repository as specified in the Remote Execution API.
|
40 | 33 |
#
|
... | ... | @@ -196,73 +189,6 @@ class CASCache(): |
196 | 189 |
|
197 | 190 |
self.set_ref(newref, tree)
|
198 | 191 |
|
199 |
- # push():
|
|
200 |
- #
|
|
201 |
- # Push committed refs to remote repository.
|
|
202 |
- #
|
|
203 |
- # Args:
|
|
204 |
- # refs (list): The refs to push
|
|
205 |
- # remote (CASRemote): The remote to push to
|
|
206 |
- #
|
|
207 |
- # Returns:
|
|
208 |
- # (bool): True if any remote was updated, False if no pushes were required
|
|
209 |
- #
|
|
210 |
- # Raises:
|
|
211 |
- # (CASError): if there was an error
|
|
212 |
- #
|
|
213 |
- def push(self, refs, remote):
|
|
214 |
- skipped_remote = True
|
|
215 |
- try:
|
|
216 |
- for ref in refs:
|
|
217 |
- tree = self.resolve_ref(ref)
|
|
218 |
- |
|
219 |
- # Check whether ref is already on the server in which case
|
|
220 |
- # there is no need to push the ref
|
|
221 |
- try:
|
|
222 |
- request = buildstream_pb2.GetReferenceRequest()
|
|
223 |
- request.key = ref
|
|
224 |
- response = remote.ref_storage.GetReference(request)
|
|
225 |
- |
|
226 |
- if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
|
|
227 |
- # ref is already on the server with the same tree
|
|
228 |
- continue
|
|
229 |
- |
|
230 |
- except grpc.RpcError as e:
|
|
231 |
- if e.code() != grpc.StatusCode.NOT_FOUND:
|
|
232 |
- # Intentionally re-raise RpcError for outer except block.
|
|
233 |
- raise
|
|
234 |
- |
|
235 |
- self._send_directory(remote, tree)
|
|
236 |
- |
|
237 |
- request = buildstream_pb2.UpdateReferenceRequest()
|
|
238 |
- request.keys.append(ref)
|
|
239 |
- request.digest.hash = tree.hash
|
|
240 |
- request.digest.size_bytes = tree.size_bytes
|
|
241 |
- remote.ref_storage.UpdateReference(request)
|
|
242 |
- |
|
243 |
- skipped_remote = False
|
|
244 |
- except grpc.RpcError as e:
|
|
245 |
- if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
|
|
246 |
- raise CASError("Failed to push ref {}: {}".format(refs, e), temporary=True) from e
|
|
247 |
- |
|
248 |
- return not skipped_remote
|
|
249 |
- |
|
250 |
- # push_directory():
|
|
251 |
- #
|
|
252 |
- # Push the given virtual directory to a remote.
|
|
253 |
- #
|
|
254 |
- # Args:
|
|
255 |
- # remote (CASRemote): The remote to push to
|
|
256 |
- # directory (Directory): A virtual directory object to push.
|
|
257 |
- #
|
|
258 |
- # Raises:
|
|
259 |
- # (CASError): if there was an error
|
|
260 |
- #
|
|
261 |
- def push_directory(self, remote, directory):
|
|
262 |
- remote.init()
|
|
263 |
- |
|
264 |
- self._send_directory(remote, directory.ref)
|
|
265 |
- |
|
266 | 192 |
# objpath():
|
267 | 193 |
#
|
268 | 194 |
# Return the path of an object based on its digest.
|
... | ... | @@ -534,6 +460,27 @@ class CASCache(): |
534 | 460 |
else:
|
535 | 461 |
return None
|
536 | 462 |
|
463 |
+ def yield_directory_digests(self, directory_digest):
|
|
464 |
+ # parse directory, and recursively add blobs
|
|
465 |
+ d = remote_execution_pb2.Digest()
|
|
466 |
+ d.hash = directory_digest.hash
|
|
467 |
+ d.size_bytes = directory_digest.size_bytes
|
|
468 |
+ yield d
|
|
469 |
+ |
|
470 |
+ directory = remote_execution_pb2.Directory()
|
|
471 |
+ |
|
472 |
+ with open(self.objpath(directory_digest), 'rb') as f:
|
|
473 |
+ directory.ParseFromString(f.read())
|
|
474 |
+ |
|
475 |
+ for filenode in directory.files:
|
|
476 |
+ d = remote_execution_pb2.Digest()
|
|
477 |
+ d.hash = filenode.digest.hash
|
|
478 |
+ d.size_bytes = filenode.digest.size_bytes
|
|
479 |
+ yield d
|
|
480 |
+ |
|
481 |
+ for dirnode in directory.directories:
|
|
482 |
+ yield from self.yield_directory_digests(dirnode.digest)
|
|
483 |
+ |
|
537 | 484 |
################################################
|
538 | 485 |
# Local Private Methods #
|
539 | 486 |
################################################
|
... | ... | @@ -702,27 +649,6 @@ class CASCache(): |
702 | 649 |
for dirnode in directory.directories:
|
703 | 650 |
self._reachable_refs_dir(reachable, dirnode.digest, update_mtime=update_mtime)
|
704 | 651 |
|
705 |
- def _required_blobs(self, directory_digest):
|
|
706 |
- # parse directory, and recursively add blobs
|
|
707 |
- d = remote_execution_pb2.Digest()
|
|
708 |
- d.hash = directory_digest.hash
|
|
709 |
- d.size_bytes = directory_digest.size_bytes
|
|
710 |
- yield d
|
|
711 |
- |
|
712 |
- directory = remote_execution_pb2.Directory()
|
|
713 |
- |
|
714 |
- with open(self.objpath(directory_digest), 'rb') as f:
|
|
715 |
- directory.ParseFromString(f.read())
|
|
716 |
- |
|
717 |
- for filenode in directory.files:
|
|
718 |
- d = remote_execution_pb2.Digest()
|
|
719 |
- d.hash = filenode.digest.hash
|
|
720 |
- d.size_bytes = filenode.digest.size_bytes
|
|
721 |
- yield d
|
|
722 |
- |
|
723 |
- for dirnode in directory.directories:
|
|
724 |
- yield from self._required_blobs(dirnode.digest)
|
|
725 |
- |
|
726 | 652 |
# _ensure_blob():
|
727 | 653 |
#
|
728 | 654 |
# Fetch and add blob if it's not already local.
|
... | ... | @@ -747,57 +673,3 @@ class CASCache(): |
747 | 673 |
assert added_digest.hash == digest.hash
|
748 | 674 |
|
749 | 675 |
return objpath
|
750 |
- |
|
751 |
- def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
|
|
752 |
- required_blobs = self._required_blobs(digest)
|
|
753 |
- |
|
754 |
- missing_blobs = dict()
|
|
755 |
- # Limit size of FindMissingBlobs request
|
|
756 |
- for required_blobs_group in _grouper(required_blobs, 512):
|
|
757 |
- request = remote_execution_pb2.FindMissingBlobsRequest()
|
|
758 |
- |
|
759 |
- for required_digest in required_blobs_group:
|
|
760 |
- d = request.blob_digests.add()
|
|
761 |
- d.hash = required_digest.hash
|
|
762 |
- d.size_bytes = required_digest.size_bytes
|
|
763 |
- |
|
764 |
- response = remote.cas.FindMissingBlobs(request)
|
|
765 |
- for missing_digest in response.missing_blob_digests:
|
|
766 |
- d = remote_execution_pb2.Digest()
|
|
767 |
- d.hash = missing_digest.hash
|
|
768 |
- d.size_bytes = missing_digest.size_bytes
|
|
769 |
- missing_blobs[d.hash] = d
|
|
770 |
- |
|
771 |
- # Upload any blobs missing on the server
|
|
772 |
- self._send_blobs(remote, missing_blobs.values(), u_uid)
|
|
773 |
- |
|
774 |
- def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
|
|
775 |
- batch = _CASBatchUpdate(remote)
|
|
776 |
- |
|
777 |
- for digest in digests:
|
|
778 |
- with open(self.objpath(digest), 'rb') as f:
|
|
779 |
- assert os.fstat(f.fileno()).st_size == digest.size_bytes
|
|
780 |
- |
|
781 |
- if (digest.size_bytes >= remote.max_batch_total_size_bytes or
|
|
782 |
- not remote.batch_update_supported):
|
|
783 |
- # Too large for batch request, upload in independent request.
|
|
784 |
- remote._send_blob(digest, f, u_uid=u_uid)
|
|
785 |
- else:
|
|
786 |
- if not batch.add(digest, f):
|
|
787 |
- # Not enough space left in batch request.
|
|
788 |
- # Complete pending batch first.
|
|
789 |
- batch.send()
|
|
790 |
- batch = _CASBatchUpdate(remote)
|
|
791 |
- batch.add(digest, f)
|
|
792 |
- |
|
793 |
- # Send final batch
|
|
794 |
- batch.send()
|
|
795 |
- |
|
796 |
- |
|
797 |
-def _grouper(iterable, n):
|
|
798 |
- while True:
|
|
799 |
- try:
|
|
800 |
- current = next(iterable)
|
|
801 |
- except StopIteration:
|
|
802 |
- return
|
|
803 |
- yield itertools.chain([current], itertools.islice(iterable, n - 1))
|
1 | 1 |
from collections import namedtuple
|
2 | 2 |
import io
|
3 |
+import itertools
|
|
3 | 4 |
import os
|
4 | 5 |
import multiprocessing
|
5 | 6 |
import signal
|
... | ... | @@ -283,6 +284,18 @@ class CASRemote(): |
283 | 284 |
else:
|
284 | 285 |
return None
|
285 | 286 |
|
287 |
+ # update_reference():
|
|
288 |
+ #
|
|
289 |
+ # Args:
|
|
290 |
+ # ref (str): Reference to update
|
|
291 |
+ # digest (Digest): New digest to update ref with
|
|
292 |
+ def update_reference(self, ref, digest):
|
|
293 |
+ request = buildstream_pb2.UpdateReferenceRequest()
|
|
294 |
+ request.keys.append(ref)
|
|
295 |
+ request.digest.hash = digest.hash
|
|
296 |
+ request.digest.size_bytes = digest.size_bytes
|
|
297 |
+ self.ref_storage.UpdateReference(request)
|
|
298 |
+ |
|
286 | 299 |
def get_tree_blob(self, tree_digest):
|
287 | 300 |
self.init()
|
288 | 301 |
f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
|
... | ... | @@ -378,6 +391,60 @@ class CASRemote(): |
378 | 391 |
while self.__tmp_downloads:
|
379 | 392 |
yield self.__tmp_downloads.pop()
|
380 | 393 |
|
394 |
+ # upload_blob():
|
|
395 |
+ #
|
|
396 |
+ # Push blobs given an iterator over blob files
|
|
397 |
+ #
|
|
398 |
+ def upload_blob(self, digest, blob_file, u_uid=uuid.uuid4(), final=False):
|
|
399 |
+ with open(blob_file, 'rb') as f:
|
|
400 |
+ assert os.fstat(f.fileno()).st_size == digest.size_bytes
|
|
401 |
+ |
|
402 |
+ if (digest.size_bytes >= self.max_batch_total_size_bytes or
|
|
403 |
+ not self.batch_update_supported):
|
|
404 |
+ # Too large for batch request, upload in independent request.
|
|
405 |
+ self._send_blob(digest, f, u_uid=u_uid)
|
|
406 |
+ else:
|
|
407 |
+ if self.__batch_update.add(digest, f) is False:
|
|
408 |
+ self.__batch_update.send()
|
|
409 |
+ self.__batch_update = _CASBatchUpdate(self)
|
|
410 |
+ self.__batch_update.add(digest, f)
|
|
411 |
+ |
|
412 |
+ def send_update_batch(self):
|
|
413 |
+ # make sure everything is sent
|
|
414 |
+ self.__batch_update.send()
|
|
415 |
+ self.__batch_update = _CASBatchUpdate(self)
|
|
416 |
+ |
|
417 |
+ # find_missing_blobs()
|
|
418 |
+ #
|
|
419 |
+ # Does FindMissingBlobs request to remote
|
|
420 |
+ #
|
|
421 |
+ # Args:
|
|
422 |
+ # required_blobs ([Digest]): list of blobs required
|
|
423 |
+ # u_uid (str): uuid4
|
|
424 |
+ #
|
|
425 |
+ # Returns:
|
|
426 |
+ # (Dict(Digest)): missing blobs
|
|
427 |
+ def find_missing_blobs(self, required_blobs, u_uid=uuid.uuid4()):
|
|
428 |
+ self.init()
|
|
429 |
+ missing_blobs = dict()
|
|
430 |
+ # Limit size of FindMissingBlobs request
|
|
431 |
+ for required_blobs_group in _grouper(required_blobs, 512):
|
|
432 |
+ request = remote_execution_pb2.FindMissingBlobsRequest()
|
|
433 |
+ |
|
434 |
+ for required_digest in required_blobs_group:
|
|
435 |
+ d = request.blob_digests.add()
|
|
436 |
+ d.hash = required_digest.hash
|
|
437 |
+ d.size_bytes = required_digest.size_bytes
|
|
438 |
+ |
|
439 |
+ response = self.cas.FindMissingBlobs(request)
|
|
440 |
+ for missing_digest in response.missing_blob_digests:
|
|
441 |
+ d = remote_execution_pb2.Digest()
|
|
442 |
+ d.hash = missing_digest.hash
|
|
443 |
+ d.size_bytes = missing_digest.size_bytes
|
|
444 |
+ missing_blobs[d.hash] = d
|
|
445 |
+ |
|
446 |
+ return missing_blobs
|
|
447 |
+ |
|
381 | 448 |
################################################
|
382 | 449 |
# Local Private Methods #
|
383 | 450 |
################################################
|
... | ... | @@ -459,6 +526,15 @@ class CASRemote(): |
459 | 526 |
self.__batch_read = _CASBatchRead(self)
|
460 | 527 |
|
461 | 528 |
|
529 |
+def _grouper(iterable, n):
|
|
530 |
+ while True:
|
|
531 |
+ try:
|
|
532 |
+ current = next(iterable)
|
|
533 |
+ except StopIteration:
|
|
534 |
+ return
|
|
535 |
+ yield itertools.chain([current], itertools.islice(iterable, n - 1))
|
|
536 |
+ |
|
537 |
+ |
|
462 | 538 |
# Represents a batch of blobs queued for fetching.
|
463 | 539 |
#
|
464 | 540 |
class _CASBatchRead():
|