[Notes] [Git][BuildStream/buildstream][raoul/802-refactor-artifactcache] 2 commits: artifactcache: Move pull logic into CASRemote



Title: GitLab

Raoul Hidalgo Charman pushed to branch raoul/802-refactor-artifactcache at BuildStream / buildstream

Commits:

6 changed files:

Changes:

  • buildstream/_artifactcache.py
    ... ... @@ -21,6 +21,7 @@ import multiprocessing
    21 21
     import os
    
    22 22
     import string
    
    23 23
     from collections.abc import Mapping
    
    24
    +import grpc
    
    24 25
     
    
    25 26
     from .types import _KeyStrength
    
    26 27
     from ._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
    
    ... ... @@ -28,7 +29,7 @@ from ._message import Message, MessageType
    28 29
     from . import utils
    
    29 30
     from . import _yaml
    
    30 31
     
    
    31
    -from ._cas.casremote import CASRemote, CASRemoteSpec
    
    32
    +from ._cas.casremote import BlobNotFound, CASRemote, CASRemoteSpec
    
    32 33
     
    
    33 34
     
    
    34 35
     CACHE_SIZE_FILE = "cache_size"
    
    ... ... @@ -607,16 +608,37 @@ class ArtifactCache():
    607 608
     
    
    608 609
             for remote in push_remotes:
    
    609 610
                 remote.init()
    
    611
    +            pushed_remote = False
    
    610 612
                 display_key = element._get_brief_display_key()
    
    611 613
                 element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
    
    612 614
     
    
    613
    -            if self.cas.push(refs, remote):
    
    614
    -                element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
    
    615
    +            try:
    
    616
    +                for ref in refs:
    
    617
    +                    # Check whether ref is already on the server in which case
    
    618
    +                    # there is no need to push the ref
    
    619
    +                    root_digest = self.cas.resolve_ref(ref)
    
    620
    +                    response = remote.get_reference(ref)
    
    621
    +                    if (response is not None and
    
    622
    +                            response.hash == root_digest.hash and
    
    623
    +                            response.size_bytes == root_digest.size_bytes):
    
    624
    +                        element.info("Remote ({}) already has {} cached".format(
    
    625
    +                            remote.spec.url, element._get_brief_display_key()))
    
    626
    +                        continue
    
    627
    +
    
    628
    +                    # upload blobs
    
    629
    +                    self._send_directory(root_digest, remote)
    
    630
    +
    
    631
    +                    remote.update_reference(ref, root_digest)
    
    632
    +                    pushed_remote = True
    
    633
    +
    
    634
    +            except grpc.RpcError as e:
    
    635
    +                if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
    
    636
    +                    raise CASError("Failed to push ref {}: {}"
    
    637
    +                                   .format(refs, e), temporary=True) from e
    
    638
    +
    
    639
    +            if pushed_remote is True:
    
    615 640
                     pushed = True
    
    616
    -            else:
    
    617
    -                element.info("Remote ({}) already has {} cached".format(
    
    618
    -                    remote.spec.url, element._get_brief_display_key()
    
    619
    -                ))
    
    641
    +                element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
    
    620 642
     
    
    621 643
             return pushed
    
    622 644
     
    
    ... ... @@ -644,19 +666,31 @@ class ArtifactCache():
    644 666
                     display_key = element._get_brief_display_key()
    
    645 667
                     element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
    
    646 668
     
    
    647
    -                if self.cas.pull(ref, remote, progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs):
    
    648
    -                    element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
    
    649
    -                    if subdir:
    
    650
    -                        # Attempt to extract subdir into artifact extract dir if it already exists
    
    651
    -                        # without containing the subdir. If the respective artifact extract dir does not
    
    652
    -                        # exist a complete extraction will complete.
    
    653
    -                        self.extract(element, key, subdir)
    
    654
    -                    # no need to pull from additional remotes
    
    655
    -                    return True
    
    656
    -                else:
    
    669
    +                root_digest = remote.get_reference(ref)
    
    670
    +
    
    671
    +                if not root_digest:
    
    657 672
                         element.info("Remote ({}) does not have {} cached".format(
    
    658
    -                        remote.spec.url, element._get_brief_display_key()
    
    659
    -                    ))
    
    673
    +                        remote.spec.url, element._get_brief_display_key()))
    
    674
    +                    continue
    
    675
    +
    
    676
    +                try:
    
    677
    +                    self._fetch_directory(remote, root_digest, excluded_subdirs)
    
    678
    +                except BlobNotFound:
    
    679
    +                    element.info("Remote ({}) is missing blobs for {}".format(
    
    680
    +                        remote.spec.url, element._get_brief_display_key()))
    
    681
    +                    continue
    
    682
    +
    
    683
    +                self.cas.set_ref(ref, root_digest)
    
    684
    +
    
    685
    +                if subdir:
    
    686
    +                    # Attempt to extract subdir into artifact extract dir if it already exists
    
    687
    +                    # without containing the subdir. If the respective artifact extract dir does not
    
    688
    +                    # exist a complete extraction will complete.
    
    689
    +                    self.extract(element, key, subdir)
    
    690
    +
    
    691
    +                element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
    
    692
    +                # no need to pull from additional remotes
    
    693
    +                return True
    
    660 694
     
    
    661 695
                 except CASError as e:
    
    662 696
                     raise ArtifactError("Failed to pull artifact {}: {}".format(
    
    ... ... @@ -671,15 +705,26 @@ class ArtifactCache():
    671 705
         #
    
    672 706
         # Args:
    
    673 707
         #     project (Project): The current project
    
    674
    -    #     digest (Digest): The digest of the tree
    
    708
    +    #     tree_digest (Digest): The digest of the tree
    
    675 709
         #
    
    676
    -    def pull_tree(self, project, digest):
    
    710
    +    def pull_tree(self, project, tree_digest):
    
    677 711
             for remote in self._remotes[project]:
    
    678
    -            digest = self.cas.pull_tree(remote, digest)
    
    679
    -
    
    680
    -            if digest:
    
    681
    -                # no need to pull from additional remotes
    
    682
    -                return digest
    
    712
    +            try:
    
    713
    +                for blob_digest in remote.yield_tree_digests(tree_digest):
    
    714
    +                    if self.cas.check_blob(blob_digest):
    
    715
    +                        continue
    
    716
    +                    remote.request_blob(blob_digest)
    
    717
    +                    for blob_file in remote.get_blobs():
    
    718
    +                        self.cas.add_object(path=blob_file.name, link_directly=True)
    
    719
    +
    
    720
    +                # Get the last batch
    
    721
    +                for blob_file in remote.get_blobs(complete_batch=True):
    
    722
    +                    self.cas.add_object(path=blob_file.name, link_directly=True)
    
    723
    +
    
    724
    +            except BlobNotFound:
    
    725
    +                continue
    
    726
    +            else:
    
    727
    +                return tree_digest
    
    683 728
     
    
    684 729
             return None
    
    685 730
     
    
    ... ... @@ -708,7 +753,7 @@ class ArtifactCache():
    708 753
                 return
    
    709 754
     
    
    710 755
             for remote in push_remotes:
    
    711
    -            self.cas.push_directory(remote, directory)
    
    756
    +            self._send_directory(directory.ref, remote)
    
    712 757
     
    
    713 758
         # push_message():
    
    714 759
         #
    
    ... ... @@ -793,6 +838,14 @@ class ArtifactCache():
    793 838
             with self.context.timed_activity("Initializing remote caches", silent_nested=True):
    
    794 839
                 self.initialize_remotes(on_failure=remote_failed)
    
    795 840
     
    
    841
    +    def _send_directory(self, root_digest, remote):
    
    842
    +        required_blobs = self.cas.yield_directory_digests(root_digest)
    
    843
    +        missing_blobs = remote.find_missing_blobs(required_blobs)
    
    844
    +        for blob in missing_blobs.values():
    
    845
    +            blob_file = self.cas.objpath(blob)
    
    846
    +            remote.upload_blob(blob, blob_file, final=True)
    
    847
    +        remote.send_update_batch()
    
    848
    +
    
    796 849
         # _write_cache_size()
    
    797 850
         #
    
    798 851
         # Writes the given size of the artifact to the cache's size file
    
    ... ... @@ -917,6 +970,19 @@ class ArtifactCache():
    917 970
             stat = os.statvfs(volume)
    
    918 971
             return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
    
    919 972
     
    
    973
    +    def _fetch_directory(self, remote, root_digest, excluded_subdirs):
    
    974
    +        for blob_digest in remote.yield_directory_digests(
    
    975
    +                root_digest, excluded_subdirs=excluded_subdirs):
    
    976
    +            if self.cas.check_blob(blob_digest):
    
    977
    +                continue
    
    978
    +            remote.request_blob(blob_digest)
    
    979
    +            for blob_file in remote.get_blobs():
    
    980
    +                self.cas.add_object(path=blob_file.name, link_directly=True)
    
    981
    +
    
    982
    +        # Request final CAS batch
    
    983
    +        for blob_file in remote.get_blobs(complete_batch=True):
    
    984
    +            self.cas.add_object(path=blob_file.name, link_directly=True)
    
    985
    +
    
    920 986
     
    
    921 987
     # _configured_remote_artifact_cache_specs():
    
    922 988
     #
    

  • buildstream/_cas/cascache.py
    ... ... @@ -18,23 +18,16 @@
    18 18
     #        Jürg Billeter <juerg billeter codethink co uk>
    
    19 19
     
    
    20 20
     import hashlib
    
    21
    -import itertools
    
    22 21
     import os
    
    23 22
     import stat
    
    24 23
     import tempfile
    
    25
    -import uuid
    
    26 24
     import contextlib
    
    27 25
     
    
    28
    -import grpc
    
    29
    -
    
    30 26
     from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
    
    31
    -from .._protos.buildstream.v2 import buildstream_pb2
    
    32 27
     
    
    33 28
     from .. import utils
    
    34 29
     from .._exceptions import CASError
    
    35 30
     
    
    36
    -from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate
    
    37
    -
    
    38 31
     
    
    39 32
     # A CASCache manages a CAS repository as specified in the Remote Execution API.
    
    40 33
     #
    
    ... ... @@ -183,73 +176,6 @@ class CASCache():
    183 176
     
    
    184 177
             return modified, removed, added
    
    185 178
     
    
    186
    -    # pull():
    
    187
    -    #
    
    188
    -    # Pull a ref from a remote repository.
    
    189
    -    #
    
    190
    -    # Args:
    
    191
    -    #     ref (str): The ref to pull
    
    192
    -    #     remote (CASRemote): The remote repository to pull from
    
    193
    -    #     progress (callable): The progress callback, if any
    
    194
    -    #     subdir (str): The optional specific subdir to pull
    
    195
    -    #     excluded_subdirs (list): The optional list of subdirs to not pull
    
    196
    -    #
    
    197
    -    # Returns:
    
    198
    -    #   (bool): True if pull was successful, False if ref was not available
    
    199
    -    #
    
    200
    -    def pull(self, ref, remote, *, progress=None, subdir=None, excluded_subdirs=None):
    
    201
    -        try:
    
    202
    -            remote.init()
    
    203
    -
    
    204
    -            request = buildstream_pb2.GetReferenceRequest(instance_name=remote.spec.instance_name)
    
    205
    -            request.key = ref
    
    206
    -            response = remote.ref_storage.GetReference(request)
    
    207
    -
    
    208
    -            tree = remote_execution_pb2.Digest()
    
    209
    -            tree.hash = response.digest.hash
    
    210
    -            tree.size_bytes = response.digest.size_bytes
    
    211
    -
    
    212
    -            # Check if the element artifact is present, if so just fetch the subdir.
    
    213
    -            if subdir and os.path.exists(self.objpath(tree)):
    
    214
    -                self._fetch_subdir(remote, tree, subdir)
    
    215
    -            else:
    
    216
    -                # Fetch artifact, excluded_subdirs determined in pullqueue
    
    217
    -                self._fetch_directory(remote, tree, excluded_subdirs=excluded_subdirs)
    
    218
    -
    
    219
    -            self.set_ref(ref, tree)
    
    220
    -
    
    221
    -            return True
    
    222
    -        except grpc.RpcError as e:
    
    223
    -            if e.code() != grpc.StatusCode.NOT_FOUND:
    
    224
    -                raise CASError("Failed to pull ref {}: {}".format(ref, e)) from e
    
    225
    -            else:
    
    226
    -                return False
    
    227
    -        except BlobNotFound as e:
    
    228
    -            return False
    
    229
    -
    
    230
    -    # pull_tree():
    
    231
    -    #
    
    232
    -    # Pull a single Tree rather than a ref.
    
    233
    -    # Does not update local refs.
    
    234
    -    #
    
    235
    -    # Args:
    
    236
    -    #     remote (CASRemote): The remote to pull from
    
    237
    -    #     digest (Digest): The digest of the tree
    
    238
    -    #
    
    239
    -    def pull_tree(self, remote, digest):
    
    240
    -        try:
    
    241
    -            remote.init()
    
    242
    -
    
    243
    -            digest = self._fetch_tree(remote, digest)
    
    244
    -
    
    245
    -            return digest
    
    246
    -
    
    247
    -        except grpc.RpcError as e:
    
    248
    -            if e.code() != grpc.StatusCode.NOT_FOUND:
    
    249
    -                raise
    
    250
    -
    
    251
    -        return None
    
    252
    -
    
    253 179
         # link_ref():
    
    254 180
         #
    
    255 181
         # Add an alias for an existing ref.
    
    ... ... @@ -263,73 +189,6 @@ class CASCache():
    263 189
     
    
    264 190
             self.set_ref(newref, tree)
    
    265 191
     
    
    266
    -    # push():
    
    267
    -    #
    
    268
    -    # Push committed refs to remote repository.
    
    269
    -    #
    
    270
    -    # Args:
    
    271
    -    #     refs (list): The refs to push
    
    272
    -    #     remote (CASRemote): The remote to push to
    
    273
    -    #
    
    274
    -    # Returns:
    
    275
    -    #   (bool): True if any remote was updated, False if no pushes were required
    
    276
    -    #
    
    277
    -    # Raises:
    
    278
    -    #   (CASError): if there was an error
    
    279
    -    #
    
    280
    -    def push(self, refs, remote):
    
    281
    -        skipped_remote = True
    
    282
    -        try:
    
    283
    -            for ref in refs:
    
    284
    -                tree = self.resolve_ref(ref)
    
    285
    -
    
    286
    -                # Check whether ref is already on the server in which case
    
    287
    -                # there is no need to push the ref
    
    288
    -                try:
    
    289
    -                    request = buildstream_pb2.GetReferenceRequest(instance_name=remote.spec.instance_name)
    
    290
    -                    request.key = ref
    
    291
    -                    response = remote.ref_storage.GetReference(request)
    
    292
    -
    
    293
    -                    if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
    
    294
    -                        # ref is already on the server with the same tree
    
    295
    -                        continue
    
    296
    -
    
    297
    -                except grpc.RpcError as e:
    
    298
    -                    if e.code() != grpc.StatusCode.NOT_FOUND:
    
    299
    -                        # Intentionally re-raise RpcError for outer except block.
    
    300
    -                        raise
    
    301
    -
    
    302
    -                self._send_directory(remote, tree)
    
    303
    -
    
    304
    -                request = buildstream_pb2.UpdateReferenceRequest(instance_name=remote.spec.instance_name)
    
    305
    -                request.keys.append(ref)
    
    306
    -                request.digest.hash = tree.hash
    
    307
    -                request.digest.size_bytes = tree.size_bytes
    
    308
    -                remote.ref_storage.UpdateReference(request)
    
    309
    -
    
    310
    -                skipped_remote = False
    
    311
    -        except grpc.RpcError as e:
    
    312
    -            if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
    
    313
    -                raise CASError("Failed to push ref {}: {}".format(refs, e), temporary=True) from e
    
    314
    -
    
    315
    -        return not skipped_remote
    
    316
    -
    
    317
    -    # push_directory():
    
    318
    -    #
    
    319
    -    # Push the given virtual directory to a remote.
    
    320
    -    #
    
    321
    -    # Args:
    
    322
    -    #     remote (CASRemote): The remote to push to
    
    323
    -    #     directory (Directory): A virtual directory object to push.
    
    324
    -    #
    
    325
    -    # Raises:
    
    326
    -    #     (CASError): if there was an error
    
    327
    -    #
    
    328
    -    def push_directory(self, remote, directory):
    
    329
    -        remote.init()
    
    330
    -
    
    331
    -        self._send_directory(remote, directory.ref)
    
    332
    -
    
    333 192
         # objpath():
    
    334 193
         #
    
    335 194
         # Return the path of an object based on its digest.
    
    ... ... @@ -591,6 +450,37 @@ class CASCache():
    591 450
             reachable = set()
    
    592 451
             self._reachable_refs_dir(reachable, tree, update_mtime=True)
    
    593 452
     
    
    453
    +    # Check to see if a blob is in the local CAS
    
    454
    +    # return None if not
    
    455
    +    def check_blob(self, digest):
    
    456
    +        objpath = self.objpath(digest)
    
    457
    +        if os.path.exists(objpath):
    
    458
    +            # already in local repository
    
    459
    +            return objpath
    
    460
    +        else:
    
    461
    +            return None
    
    462
    +
    
    463
    +    def yield_directory_digests(self, directory_digest):
    
    464
    +        # parse directory, and recursively add blobs
    
    465
    +        d = remote_execution_pb2.Digest()
    
    466
    +        d.hash = directory_digest.hash
    
    467
    +        d.size_bytes = directory_digest.size_bytes
    
    468
    +        yield d
    
    469
    +
    
    470
    +        directory = remote_execution_pb2.Directory()
    
    471
    +
    
    472
    +        with open(self.objpath(directory_digest), 'rb') as f:
    
    473
    +            directory.ParseFromString(f.read())
    
    474
    +
    
    475
    +        for filenode in directory.files:
    
    476
    +            d = remote_execution_pb2.Digest()
    
    477
    +            d.hash = filenode.digest.hash
    
    478
    +            d.size_bytes = filenode.digest.size_bytes
    
    479
    +            yield d
    
    480
    +
    
    481
    +        for dirnode in directory.directories:
    
    482
    +            yield from self.yield_directory_digests(dirnode.digest)
    
    483
    +
    
    594 484
         ################################################
    
    595 485
         #             Local Private Methods            #
    
    596 486
         ################################################
    
    ... ... @@ -779,202 +669,3 @@ class CASCache():
    779 669
     
    
    780 670
             for dirnode in directory.directories:
    
    781 671
                 yield from self._required_blobs(dirnode.digest)
    782
    -
    
    783
    -    # _ensure_blob():
    
    784
    -    #
    
    785
    -    # Fetch and add blob if it's not already local.
    
    786
    -    #
    
    787
    -    # Args:
    
    788
    -    #     remote (Remote): The remote to use.
    
    789
    -    #     digest (Digest): Digest object for the blob to fetch.
    
    790
    -    #
    
    791
    -    # Returns:
    
    792
    -    #     (str): The path of the object
    
    793
    -    #
    
    794
    -    def _ensure_blob(self, remote, digest):
    
    795
    -        objpath = self.objpath(digest)
    
    796
    -        if os.path.exists(objpath):
    
    797
    -            # already in local repository
    
    798
    -            return objpath
    
    799
    -
    
    800
    -        with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
    
    801
    -            remote._fetch_blob(digest, f)
    
    802
    -
    
    803
    -            added_digest = self.add_object(path=f.name, link_directly=True)
    
    804
    -            assert added_digest.hash == digest.hash
    
    805
    -
    
    806
    -        return objpath
    
    807
    -
    
    808
    -    def _batch_download_complete(self, batch):
    
    809
    -        for digest, data in batch.send():
    
    810
    -            with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
    
    811
    -                f.write(data)
    
    812
    -                f.flush()
    
    813
    -
    
    814
    -                added_digest = self.add_object(path=f.name, link_directly=True)
    
    815
    -                assert added_digest.hash == digest.hash
    
    816
    -
    
    817
    -    # Helper function for _fetch_directory().
    
    818
    -    def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
    
    819
    -        self._batch_download_complete(batch)
    
    820
    -
    
    821
    -        # All previously scheduled directories are now locally available,
    
    822
    -        # move them to the processing queue.
    
    823
    -        fetch_queue.extend(fetch_next_queue)
    
    824
    -        fetch_next_queue.clear()
    
    825
    -        return _CASBatchRead(remote)
    
    826
    -
    
    827
    -    # Helper function for _fetch_directory().
    
    828
    -    def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
    
    829
    -        in_local_cache = os.path.exists(self.objpath(digest))
    
    830
    -
    
    831
    -        if in_local_cache:
    
    832
    -            # Skip download, already in local cache.
    
    833
    -            pass
    
    834
    -        elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
    
    835
    -              not remote.batch_read_supported):
    
    836
    -            # Too large for batch request, download in independent request.
    
    837
    -            self._ensure_blob(remote, digest)
    
    838
    -            in_local_cache = True
    
    839
    -        else:
    
    840
    -            if not batch.add(digest):
    
    841
    -                # Not enough space left in batch request.
    
    842
    -                # Complete pending batch first.
    
    843
    -                batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
    
    844
    -                batch.add(digest)
    
    845
    -
    
    846
    -        if recursive:
    
    847
    -            if in_local_cache:
    
    848
    -                # Add directory to processing queue.
    
    849
    -                fetch_queue.append(digest)
    
    850
    -            else:
    
    851
    -                # Directory will be available after completing pending batch.
    
    852
    -                # Add directory to deferred processing queue.
    
    853
    -                fetch_next_queue.append(digest)
    
    854
    -
    
    855
    -        return batch
    
    856
    -
    
    857
    -    # _fetch_directory():
    
    858
    -    #
    
    859
    -    # Fetches remote directory and adds it to content addressable store.
    
    860
    -    #
    
    861
    -    # Fetches files, symbolic links and recursively other directories in
    
    862
    -    # the remote directory and adds them to the content addressable
    
    863
    -    # store.
    
    864
    -    #
    
    865
    -    # Args:
    
    866
    -    #     remote (Remote): The remote to use.
    
    867
    -    #     dir_digest (Digest): Digest object for the directory to fetch.
    
    868
    -    #     excluded_subdirs (list): The optional list of subdirs to not fetch
    
    869
    -    #
    
    870
    -    def _fetch_directory(self, remote, dir_digest, *, excluded_subdirs=None):
    
    871
    -        fetch_queue = [dir_digest]
    
    872
    -        fetch_next_queue = []
    
    873
    -        batch = _CASBatchRead(remote)
    
    874
    -        if not excluded_subdirs:
    
    875
    -            excluded_subdirs = []
    
    876
    -
    
    877
    -        while len(fetch_queue) + len(fetch_next_queue) > 0:
    
    878
    -            if not fetch_queue:
    
    879
    -                batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
    
    880
    -
    
    881
    -            dir_digest = fetch_queue.pop(0)
    
    882
    -
    
    883
    -            objpath = self._ensure_blob(remote, dir_digest)
    
    884
    -
    
    885
    -            directory = remote_execution_pb2.Directory()
    
    886
    -            with open(objpath, 'rb') as f:
    
    887
    -                directory.ParseFromString(f.read())
    
    888
    -
    
    889
    -            for dirnode in directory.directories:
    
    890
    -                if dirnode.name not in excluded_subdirs:
    
    891
    -                    batch = self._fetch_directory_node(remote, dirnode.digest, batch,
    
    892
    -                                                       fetch_queue, fetch_next_queue, recursive=True)
    
    893
    -
    
    894
    -            for filenode in directory.files:
    
    895
    -                batch = self._fetch_directory_node(remote, filenode.digest, batch,
    
    896
    -                                                   fetch_queue, fetch_next_queue)
    
    897
    -
    
    898
    -        # Fetch final batch
    
    899
    -        self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
    
    900
    -
    
    901
    -    def _fetch_subdir(self, remote, tree, subdir):
    
    902
    -        subdirdigest = self._get_subdir(tree, subdir)
    
    903
    -        self._fetch_directory(remote, subdirdigest)
    
    904
    -
    
    905
    -    def _fetch_tree(self, remote, digest):
    
    906
    -        # download but do not store the Tree object
    
    907
    -        with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
    
    908
    -            remote._fetch_blob(digest, out)
    
    909
    -
    
    910
    -            tree = remote_execution_pb2.Tree()
    
    911
    -
    
    912
    -            with open(out.name, 'rb') as f:
    
    913
    -                tree.ParseFromString(f.read())
    
    914
    -
    
    915
    -            tree.children.extend([tree.root])
    
    916
    -            for directory in tree.children:
    
    917
    -                for filenode in directory.files:
    
    918
    -                    self._ensure_blob(remote, filenode.digest)
    
    919
    -
    
    920
    -                # place directory blob only in final location when we've downloaded
    
    921
    -                # all referenced blobs to avoid dangling references in the repository
    
    922
    -                dirbuffer = directory.SerializeToString()
    
    923
    -                dirdigest = self.add_object(buffer=dirbuffer)
    
    924
    -                assert dirdigest.size_bytes == len(dirbuffer)
    
    925
    -
    
    926
    -        return dirdigest
    
    927
    -
    
    928
    -    def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
    
    929
    -        required_blobs = self._required_blobs(digest)
    
    930
    -
    
    931
    -        missing_blobs = dict()
    
    932
    -        # Limit size of FindMissingBlobs request
    
    933
    -        for required_blobs_group in _grouper(required_blobs, 512):
    
    934
    -            request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name)
    
    935
    -
    
    936
    -            for required_digest in required_blobs_group:
    
    937
    -                d = request.blob_digests.add()
    
    938
    -                d.hash = required_digest.hash
    
    939
    -                d.size_bytes = required_digest.size_bytes
    
    940
    -
    
    941
    -            response = remote.cas.FindMissingBlobs(request)
    
    942
    -            for missing_digest in response.missing_blob_digests:
    
    943
    -                d = remote_execution_pb2.Digest()
    
    944
    -                d.hash = missing_digest.hash
    
    945
    -                d.size_bytes = missing_digest.size_bytes
    
    946
    -                missing_blobs[d.hash] = d
    
    947
    -
    
    948
    -        # Upload any blobs missing on the server
    
    949
    -        self._send_blobs(remote, missing_blobs.values(), u_uid)
    
    950
    -
    
    951
    -    def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
    
    952
    -        batch = _CASBatchUpdate(remote)
    
    953
    -
    
    954
    -        for digest in digests:
    
    955
    -            with open(self.objpath(digest), 'rb') as f:
    
    956
    -                assert os.fstat(f.fileno()).st_size == digest.size_bytes
    
    957
    -
    
    958
    -                if (digest.size_bytes >= remote.max_batch_total_size_bytes or
    
    959
    -                        not remote.batch_update_supported):
    
    960
    -                    # Too large for batch request, upload in independent request.
    
    961
    -                    remote._send_blob(digest, f, u_uid=u_uid)
    
    962
    -                else:
    
    963
    -                    if not batch.add(digest, f):
    
    964
    -                        # Not enough space left in batch request.
    
    965
    -                        # Complete pending batch first.
    
    966
    -                        batch.send()
    
    967
    -                        batch = _CASBatchUpdate(remote)
    
    968
    -                        batch.add(digest, f)
    
    969
    -
    
    970
    -        # Send final batch
    
    971
    -        batch.send()
    
    972
    -
    
    973
    -
    
    974
    -def _grouper(iterable, n):
    
    975
    -    while True:
    
    976
    -        try:
    
    977
    -            current = next(iterable)
    
    978
    -        except StopIteration:
    
    979
    -            return
    
    980
    -        yield itertools.chain([current], itertools.islice(iterable, n - 1))

  • buildstream/_cas/casremote.py
    1 1
     from collections import namedtuple
    
    2 2
     import io
    
    3
    +import itertools
    
    3 4
     import os
    
    4 5
     import multiprocessing
    
    5 6
     import signal
    
    7
    +import tempfile
    
    6 8
     from urllib.parse import urlparse
    
    7 9
     import uuid
    
    8 10
     
    
    ... ... @@ -96,6 +98,11 @@ class CASRemote():
    96 98
             self.tmpdir = str(tmpdir)
    
    97 99
             os.makedirs(self.tmpdir, exist_ok=True)
    
    98 100
     
    
    101
    +        self.__tmp_downloads = []  # files in the tmpdir waiting to be added to local caches
    
    102
    +
    
    103
    +        self.__batch_read = None
    
    104
    +        self.__batch_update = None
    
    105
    +
    
    99 106
         def init(self):
    
    100 107
             if not self._initialized:
    
    101 108
                 url = urlparse(self.spec.url)
    
    ... ... @@ -153,6 +160,7 @@ class CASRemote():
    153 160
                     request = remote_execution_pb2.BatchReadBlobsRequest()
    
    154 161
                     response = self.cas.BatchReadBlobs(request)
    
    155 162
                     self.batch_read_supported = True
    
    163
    +                self.__batch_read = _CASBatchRead(self)
    
    156 164
                 except grpc.RpcError as e:
    
    157 165
                     if e.code() != grpc.StatusCode.UNIMPLEMENTED:
    
    158 166
                         raise
    
    ... ... @@ -163,6 +171,7 @@ class CASRemote():
    163 171
                     request = remote_execution_pb2.BatchUpdateBlobsRequest()
    
    164 172
                     response = self.cas.BatchUpdateBlobs(request)
    
    165 173
                     self.batch_update_supported = True
    
    174
    +                self.__batch_update = _CASBatchUpdate(self)
    
    166 175
                 except grpc.RpcError as e:
    
    167 176
                     if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
    
    168 177
                             e.code() != grpc.StatusCode.PERMISSION_DENIED):
    
    ... ... @@ -259,6 +268,210 @@ class CASRemote():
    259 268
     
    
    260 269
             return message_digest
    
    261 270
     
    
    271
    +    # get_reference():
    
    272
    +    #
    
    273
    +    # Args:
    
    274
    +    #    ref (str): The ref to request
    
    275
    +    #
    
    276
    +    # Returns:
    
    277
    +    #    (digest): digest of ref, None if not found
    
    278
    +    #
    
    279
    +    def get_reference(self, ref):
    
    280
    +        try:
    
    281
    +            self.init()
    
    282
    +
    
    283
    +            request = buildstream_pb2.GetReferenceRequest()
    
    284
    +            request.key = ref
    
    285
    +            return self.ref_storage.GetReference(request).digest
    
    286
    +        except grpc.RpcError as e:
    
    287
    +            if e.code() != grpc.StatusCode.NOT_FOUND:
    
    288
    +                raise CASError("Failed to find ref {}: {}".format(ref, e)) from e
    
    289
    +            else:
    
    290
    +                return None
    
    291
    +
    
    292
    +    # update_reference():
    
    293
    +    #
    
    294
    +    # Args:
    
    295
    +    #    ref (str): Reference to update
    
    296
    +    #    digest (Digest): New digest to update ref with
    
    297
    +    def update_reference(self, ref, digest):
    
    298
    +        request = buildstream_pb2.UpdateReferenceRequest()
    
    299
    +        request.keys.append(ref)
    
    300
    +        request.digest.hash = digest.hash
    
    301
    +        request.digest.size_bytes = digest.size_bytes
    
    302
    +        self.ref_storage.UpdateReference(request)
    
    303
    +
    
    304
    +    def get_tree_blob(self, tree_digest):
    
    305
    +        self.init()
    
    306
    +        f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    307
    +        self._fetch_blob(tree_digest, f)
    
    308
    +
    
    309
    +        tree = remote_execution_pb2.Tree()
    
    310
    +        with open(f.name, 'rb') as tmp:
    
    311
    +            tree.ParseFromString(tmp.read())
    
    312
    +
    
    313
    +        return tree
    
    314
    +
    
    315
    +    # yield_directory_digests():
    
    316
    +    #
    
    317
    +    # Recursively iterates over digests for files, symbolic links and other
    
    318
    +    # directories starting from a root digest
    
    319
    +    #
    
    320
    +    # Args:
    
    321
    +    #     root_digest (digest): The root_digest to get a tree of
    
    322
    +    #     progress (callable): The progress callback, if any
    
    323
    +    #     subdir (str): The optional specific subdir to pull
    
    324
    +    #     excluded_subdirs (list): The optional list of subdirs to not pull
    
    325
    +    #
    
    326
    +    # Returns:
    
    327
    +    #     (iter digests): recursively iterates over digests contained in root directory
    
    328
    +    #
    
    329
    +    def yield_directory_digests(self, root_digest, *, progress=None,
    
    330
    +                                subdir=None, excluded_subdirs=None):
    
    331
    +        self.init()
    
    332
    +
    
    333
    +        # Fetch artifact, excluded_subdirs determined in pullqueue
    
    334
    +        if excluded_subdirs is None:
    
    335
    +            excluded_subdirs = []
    
    336
    +
    
    337
    +        # get directory blob
    
    338
    +        f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    339
    +        self._fetch_blob(root_digest, f)
    
    340
    +
    
    341
    +        directory = remote_execution_pb2.Directory()
    
    342
    +        with open(f.name, 'rb') as tmp:
    
    343
    +            directory.ParseFromString(tmp.read())
    
    344
    +
    
    345
    +        yield root_digest
    
    346
    +        for filenode in directory.files:
    
    347
    +            yield filenode.digest
    
    348
    +
    
    349
    +        for dirnode in directory.directories:
    
    350
    +            if dirnode.name not in excluded_subdirs:
    
    351
    +                yield from self.yield_directory_digests(dirnode.digest)
    
    352
    +
    
    353
    +    # yield_tree_digests():
    
    354
    +    #
    
    355
    +    # Fetches a tree file from digests and then iterates over child digests
    
    356
    +    #
    
    357
    +    # Args:
    
    358
    +    #     tree_digest (digest): tree digest
    
    359
    +    #
    
    360
    +    # Returns:
    
    361
    +    #     (iter digests): iterates over digests in tree message
    
    362
    +    def yield_tree_digests(self, tree_digest):
    
    363
    +        self.init()
    
    364
    +
    
    365
    +        # get tree file
    
    366
    +        f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    367
    +        self._fetch_blob(tree_digest, f)
    
    368
    +        tree = remote_execution_pb2.Tree()
    
    369
    +        tree.ParseFromString(f.read())
    
    370
    +
    
    371
    +        tree.children.extend([tree.root])
    
    372
    +        for directory in tree.children:
    
    373
    +            for filenode in directory.files:
    
    374
    +                yield filenode.digest
    
    375
    +
    
    376
    +            # add the directory to downloaded tmp files to be added
    
    377
    +            f2 = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    378
    +            f2.write(directory.SerializeToString())
    
    379
    +            self.__tmp_downloads.append(f2)
    
    380
    +
    
    381
    +        # Add the tree directory to downloads right at the end
    
    382
    +        self.__tmp_downloads.append(f)
    
    383
    +
    
    384
    +    # request_blob():
    
    385
    +    #
    
    386
    +    # Request blob, triggering download depending via bytestream or cas
    
    387
    +    # BatchReadBlobs depending on size.
    
    388
    +    #
    
    389
    +    # Args:
    
    390
    +    #    digest (Digest): digest of the requested blob
    
    391
    +    #
    
    392
    +    def request_blob(self, digest):
    
    393
    +        if (not self.batch_read_supported or
    
    394
    +                digest.size_bytes > self.max_batch_total_size_bytes):
    
    395
    +            f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    396
    +            self._fetch_blob(digest, f)
    
    397
    +            self.__tmp_downloads.append(f)
    
    398
    +        elif self.__batch_read.add(digest) is False:
    
    399
    +            self._download_batch()
    
    400
    +            self.__batch_read.add(digest)
    
    401
    +
    
    402
    +    # get_blobs():
    
    403
    +    #
    
    404
    +    # Yield over downloaded blobs in the tmp file locations, causing the files
    
    405
    +    # to be deleted once they go out of scope.
    
    406
    +    #
    
    407
    +    # Args:
    
    408
    +    #    complete_batch (bool): download any outstanding batch read request
    
    409
    +    #
    
    410
    +    # Returns:
    
    411
    +    #    iterator over NamedTemporaryFile
    
    412
    +    def get_blobs(self, complete_batch=False):
    
    413
    +        # Send read batch request and download
    
    414
    +        if (complete_batch is True and
    
    415
    +                self.batch_read_supported is True):
    
    416
    +            self._download_batch()
    
    417
    +
    
    418
    +        while self.__tmp_downloads:
    
    419
    +            yield self.__tmp_downloads.pop()
    
    420
    +
    
    421
    +    # upload_blob():
    
    422
    +    #
    
    423
    +    # Push blobs given an iterator over blob files
    
    424
    +    #
    
    425
    +    def upload_blob(self, digest, blob_file, u_uid=uuid.uuid4(), final=False):
    
    426
    +        with open(blob_file, 'rb') as f:
    
    427
    +            assert os.fstat(f.fileno()).st_size == digest.size_bytes
    
    428
    +
    
    429
    +            if (digest.size_bytes >= self.max_batch_total_size_bytes or
    
    430
    +                    not self.batch_update_supported):
    
    431
    +                # Too large for batch request, upload in independent request.
    
    432
    +                self._send_blob(digest, f, u_uid=u_uid)
    
    433
    +            else:
    
    434
    +                if self.__batch_update.add(digest, f) is False:
    
    435
    +                    self.__batch_update.send()
    
    436
    +                    self.__batch_update = _CASBatchUpdate(self)
    
    437
    +                    self.__batch_update.add(digest, f)
    
    438
    +
    
    439
    +    def send_update_batch(self):
    
    440
    +        # make sure everything is sent
    
    441
    +        self.__batch_update.send()
    
    442
    +        self.__batch_update = _CASBatchUpdate(self)
    
    443
    +
    
    444
    +    # find_missing_blobs()
    
    445
    +    #
    
    446
    +    # Does FindMissingBlobs request to remote
    
    447
    +    #
    
    448
    +    # Args:
    
    449
    +    #    required_blobs ([Digest]): list of blobs required
    
    450
    +    #    u_uid (str): uuid4
    
    451
    +    #
    
    452
    +    # Returns:
    
    453
    +    #    (Dict(Digest)): missing blobs
    
    454
    +    def find_missing_blobs(self, required_blobs, u_uid=uuid.uuid4()):
    
    455
    +        self.init()
    
    456
    +        missing_blobs = dict()
    
    457
    +        # Limit size of FindMissingBlobs request
    
    458
    +        for required_blobs_group in _grouper(required_blobs, 512):
    
    459
    +            request = remote_execution_pb2.FindMissingBlobsRequest()
    
    460
    +
    
    461
    +            for required_digest in required_blobs_group:
    
    462
    +                d = request.blob_digests.add()
    
    463
    +                d.hash = required_digest.hash
    
    464
    +                d.size_bytes = required_digest.size_bytes
    
    465
    +
    
    466
    +            response = self.cas.FindMissingBlobs(request)
    
    467
    +            for missing_digest in response.missing_blob_digests:
    
    468
    +                d = remote_execution_pb2.Digest()
    
    469
    +                d.hash = missing_digest.hash
    
    470
    +                d.size_bytes = missing_digest.size_bytes
    
    471
    +                missing_blobs[d.hash] = d
    
    472
    +
    
    473
    +        return missing_blobs
    
    474
    +
    
    262 475
         ################################################
    
    263 476
         #             Local Private Methods            #
    
    264 477
         ################################################
    
    ... ... @@ -301,6 +514,24 @@ class CASRemote():
    301 514
     
    
    302 515
             assert response.committed_size == digest.size_bytes
    
    303 516
     
    
    517
    +    def _download_batch(self):
    
    518
    +        for _, data in self.__batch_read.send():
    
    519
    +            f = tempfile.NamedTemporaryFile(dir=self.tmpdir)
    
    520
    +            f.write(data)
    
    521
    +            f.flush()
    
    522
    +            self.__tmp_downloads.append(f)
    
    523
    +
    
    524
    +        self.__batch_read = _CASBatchRead(self)
    
    525
    +
    
    526
    +
    
    527
    +def _grouper(iterable, n):
    
    528
    +    while True:
    
    529
    +        try:
    
    530
    +            current = next(iterable)
    
    531
    +        except StopIteration:
    
    532
    +            return
    
    533
    +        yield itertools.chain([current], itertools.islice(iterable, n - 1))
    
    534
    +
    
    304 535
     
    
    305 536
     # Represents a batch of blobs queued for fetching.
    
    306 537
     #
    

  • tests/artifactcache/pull.py
    ... ... @@ -110,7 +110,7 @@ def test_pull(cli, tmpdir, datafiles):
    110 110
             # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
    
    111 111
             process = multiprocessing.Process(target=_queue_wrapper,
    
    112 112
                                               args=(_test_pull, queue, user_config_file, project_dir,
    
    113
    -                                                artifact_dir, 'target.bst', element_key))
    
    113
    +                                                artifact_dir, tmpdir, 'target.bst', element_key))
    
    114 114
     
    
    115 115
             try:
    
    116 116
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -126,14 +126,18 @@ def test_pull(cli, tmpdir, datafiles):
    126 126
             assert not error
    
    127 127
             assert cas.contains(element, element_key)
    
    128 128
     
    
    129
    +        # Check that the tmp dir is cleared out
    
    130
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    129 131
     
    
    130
    -def _test_pull(user_config_file, project_dir, artifact_dir,
    
    132
    +
    
    133
    +def _test_pull(user_config_file, project_dir, artifact_dir, tmpdir,
    
    131 134
                    element_name, element_key, queue):
    
    132 135
         # Fake minimal context
    
    133 136
         context = Context()
    
    134 137
         context.load(config=user_config_file)
    
    135 138
         context.artifactdir = artifact_dir
    
    136 139
         context.set_message_handler(message_handler)
    
    140
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    137 141
     
    
    138 142
         # Load the project manually
    
    139 143
         project = Project(project_dir, context)
    
    ... ... @@ -218,7 +222,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
    218 222
             # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
    
    219 223
             process = multiprocessing.Process(target=_queue_wrapper,
    
    220 224
                                               args=(_test_push_tree, queue, user_config_file, project_dir,
    
    221
    -                                                artifact_dir, artifact_digest))
    
    225
    +                                                artifact_dir, tmpdir, artifact_digest))
    
    222 226
     
    
    223 227
             try:
    
    224 228
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -239,6 +243,9 @@ def test_pull_tree(cli, tmpdir, datafiles):
    239 243
             # Assert that we are not cached locally anymore
    
    240 244
             assert cli.get_element_state(project_dir, 'target.bst') != 'cached'
    
    241 245
     
    
    246
    +        # Check that the tmp dir is cleared out
    
    247
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    248
    +
    
    242 249
             tree_digest = remote_execution_pb2.Digest(hash=tree_hash,
    
    243 250
                                                       size_bytes=tree_size)
    
    244 251
     
    
    ... ... @@ -246,7 +253,7 @@ def test_pull_tree(cli, tmpdir, datafiles):
    246 253
             # Use subprocess to avoid creation of gRPC threads in main BuildStream process
    
    247 254
             process = multiprocessing.Process(target=_queue_wrapper,
    
    248 255
                                               args=(_test_pull_tree, queue, user_config_file, project_dir,
    
    249
    -                                                artifact_dir, tree_digest))
    
    256
    +                                                artifact_dir, tmpdir, tree_digest))
    
    250 257
     
    
    251 258
             try:
    
    252 259
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -267,13 +274,18 @@ def test_pull_tree(cli, tmpdir, datafiles):
    267 274
             # Ensure the entire Tree stucture has been pulled
    
    268 275
             assert os.path.exists(cas.objpath(directory_digest))
    
    269 276
     
    
    277
    +        # Check that the tmp dir is cleared out
    
    278
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    279
    +
    
    270 280
     
    
    271
    -def _test_push_tree(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
    
    281
    +def _test_push_tree(user_config_file, project_dir, artifact_dir, tmpdir,
    
    282
    +                    artifact_digest, queue):
    
    272 283
         # Fake minimal context
    
    273 284
         context = Context()
    
    274 285
         context.load(config=user_config_file)
    
    275 286
         context.artifactdir = artifact_dir
    
    276 287
         context.set_message_handler(message_handler)
    
    288
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    277 289
     
    
    278 290
         # Load the project manually
    
    279 291
         project = Project(project_dir, context)
    
    ... ... @@ -304,12 +316,14 @@ def _test_push_tree(user_config_file, project_dir, artifact_dir, artifact_digest
    304 316
             queue.put("No remote configured")
    
    305 317
     
    
    306 318
     
    
    307
    -def _test_pull_tree(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
    
    319
    +def _test_pull_tree(user_config_file, project_dir, artifact_dir, tmpdir,
    
    320
    +                    artifact_digest, queue):
    
    308 321
         # Fake minimal context
    
    309 322
         context = Context()
    
    310 323
         context.load(config=user_config_file)
    
    311 324
         context.artifactdir = artifact_dir
    
    312 325
         context.set_message_handler(message_handler)
    
    326
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    313 327
     
    
    314 328
         # Load the project manually
    
    315 329
         project = Project(project_dir, context)
    

  • tests/artifactcache/push.py
    ... ... @@ -89,7 +89,7 @@ def test_push(cli, tmpdir, datafiles):
    89 89
             # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
    
    90 90
             process = multiprocessing.Process(target=_queue_wrapper,
    
    91 91
                                               args=(_test_push, queue, user_config_file, project_dir,
    
    92
    -                                                artifact_dir, 'target.bst', element_key))
    
    92
    +                                                artifact_dir, tmpdir, 'target.bst', element_key))
    
    93 93
     
    
    94 94
             try:
    
    95 95
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -105,14 +105,18 @@ def test_push(cli, tmpdir, datafiles):
    105 105
             assert not error
    
    106 106
             assert share.has_artifact('test', 'target.bst', element_key)
    
    107 107
     
    
    108
    +        # Check tmpdir for downloads is cleared
    
    109
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    108 110
     
    
    109
    -def _test_push(user_config_file, project_dir, artifact_dir,
    
    111
    +
    
    112
    +def _test_push(user_config_file, project_dir, artifact_dir, tmpdir,
    
    110 113
                    element_name, element_key, queue):
    
    111 114
         # Fake minimal context
    
    112 115
         context = Context()
    
    113 116
         context.load(config=user_config_file)
    
    114 117
         context.artifactdir = artifact_dir
    
    115 118
         context.set_message_handler(message_handler)
    
    119
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    116 120
     
    
    117 121
         # Load the project manually
    
    118 122
         project = Project(project_dir, context)
    
    ... ... @@ -196,9 +200,10 @@ def test_push_directory(cli, tmpdir, datafiles):
    196 200
             queue = multiprocessing.Queue()
    
    197 201
             # Use subprocess to avoid creation of gRPC threads in main BuildStream process
    
    198 202
             # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
    
    199
    -        process = multiprocessing.Process(target=_queue_wrapper,
    
    200
    -                                          args=(_test_push_directory, queue, user_config_file,
    
    201
    -                                                project_dir, artifact_dir, artifact_digest))
    
    203
    +        process = multiprocessing.Process(
    
    204
    +            target=_queue_wrapper,
    
    205
    +            args=(_test_push_directory, queue, user_config_file, project_dir,
    
    206
    +                  artifact_dir, tmpdir, artifact_digest))
    
    202 207
     
    
    203 208
             try:
    
    204 209
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -215,13 +220,17 @@ def test_push_directory(cli, tmpdir, datafiles):
    215 220
             assert artifact_digest.hash == directory_hash
    
    216 221
             assert share.has_object(artifact_digest)
    
    217 222
     
    
    223
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    218 224
     
    
    219
    -def _test_push_directory(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
    
    225
    +
    
    226
    +def _test_push_directory(user_config_file, project_dir, artifact_dir, tmpdir,
    
    227
    +                         artifact_digest, queue):
    
    220 228
         # Fake minimal context
    
    221 229
         context = Context()
    
    222 230
         context.load(config=user_config_file)
    
    223 231
         context.artifactdir = artifact_dir
    
    224 232
         context.set_message_handler(message_handler)
    
    233
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    225 234
     
    
    226 235
         # Load the project manually
    
    227 236
         project = Project(project_dir, context)
    
    ... ... @@ -273,7 +282,7 @@ def test_push_message(cli, tmpdir, datafiles):
    273 282
             # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
    
    274 283
             process = multiprocessing.Process(target=_queue_wrapper,
    
    275 284
                                               args=(_test_push_message, queue, user_config_file,
    
    276
    -                                                project_dir, artifact_dir))
    
    285
    +                                                project_dir, artifact_dir, tmpdir))
    
    277 286
     
    
    278 287
             try:
    
    279 288
                 # Keep SIGINT blocked in the child process
    
    ... ... @@ -291,13 +300,16 @@ def test_push_message(cli, tmpdir, datafiles):
    291 300
                                                          size_bytes=message_size)
    
    292 301
             assert share.has_object(message_digest)
    
    293 302
     
    
    303
    +        assert os.listdir(os.path.join(str(tmpdir), 'cache', 'tmp')) == []
    
    304
    +
    
    294 305
     
    
    295
    -def _test_push_message(user_config_file, project_dir, artifact_dir, queue):
    
    306
    +def _test_push_message(user_config_file, project_dir, artifact_dir, tmpdir, queue):
    
    296 307
         # Fake minimal context
    
    297 308
         context = Context()
    
    298 309
         context.load(config=user_config_file)
    
    299 310
         context.artifactdir = artifact_dir
    
    300 311
         context.set_message_handler(message_handler)
    
    312
    +    context.tmpdir = os.path.join(str(tmpdir), 'cache', 'tmp')
    
    301 313
     
    
    302 314
         # Load the project manually
    
    303 315
         project = Project(project_dir, context)
    

  • tests/integration/pullbuildtrees.py
    ... ... @@ -79,6 +79,9 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache):
    79 79
             assert os.path.isdir(buildtreedir)
    
    80 80
             default_state(cli, tmpdir, share1)
    
    81 81
     
    
    82
    +        # Check tmpdir for downloads is cleared
    
    83
    +        assert os.listdir(os.path.join(integration_cache, 'tmp')) == []
    
    84
    +
    
    82 85
             # Pull artifact with pullbuildtrees set in user config, then assert
    
    83 86
             # that pulling with the same user config doesn't creates a pull job,
    
    84 87
             # or when buildtrees cli flag is set.
    
    ... ... @@ -91,6 +94,9 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache):
    91 94
             assert element_name not in result.get_pulled_elements()
    
    92 95
             default_state(cli, tmpdir, share1)
    
    93 96
     
    
    97
    +        # Check tmpdir for downloads is cleared
    
    98
    +        assert os.listdir(os.path.join(integration_cache, 'tmp')) == []
    
    99
    +
    
    94 100
             # Pull artifact with default config and buildtrees cli flag set, then assert
    
    95 101
             # that pulling with pullbuildtrees set in user config doesn't create a pull
    
    96 102
             # job.
    
    ... ... @@ -101,6 +107,9 @@ def test_pullbuildtrees(cli, tmpdir, datafiles, integration_cache):
    101 107
             assert element_name not in result.get_pulled_elements()
    
    102 108
             default_state(cli, tmpdir, share1)
    
    103 109
     
    
    110
    +        # Check tmpdir for downloads is cleared
    
    111
    +        assert os.listdir(os.path.join(integration_cache, 'tmp')) == []
    
    112
    +
    
    104 113
             # Assert that a partial build element (not containing a populated buildtree dir)
    
    105 114
             # can't be pushed to an artifact share, then assert that a complete build element
    
    106 115
             # can be. This will attempt a partial pull from share1 and then a partial push
    



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]