[Notes] [Git][BuildStream/buildstream][valentindavid/cache_server_fill_up] 11 commits: contributing: add guidance on unit tests



Title: GitLab

Valentin David pushed to branch valentindavid/cache_server_fill_up at BuildStream / buildstream

Commits:

5 changed files:

Changes:

  • CONTRIBUTING.rst
    ... ... @@ -1547,6 +1547,24 @@ Tests that run a sandbox should be decorated with::
    1547 1547
     
    
    1548 1548
     and use the integration cli helper.
    
    1549 1549
     
    
    1550
    +You should first aim to write tests that exercise your changes from the cli.
    
    1551
    +This is so that the testing is end-to-end, and the changes are guaranteed to
    
    1552
    +work for the end-user. The cli is considered stable, and so tests written in
    
    1553
    +terms of it are unlikely to require updating as the internals of the software
    
    1554
    +change over time.
    
    1555
    +
    
    1556
    +It may be impractical to sufficiently examine some changes this way. For
    
    1557
    +example, the number of cases to test and the running time of each test may be
    
    1558
    +too high. It may also be difficult to contrive circumstances to cover every
    
    1559
    +line of the change. If this is the case, next you can consider also writing
    
    1560
    +unit tests that work more directly on the changes.
    
    1561
    +
    
    1562
    +It is important to write unit tests in such a way that they do not break due to
    
    1563
    +changes unrelated to what they are meant to test. For example, if the test
    
    1564
    +relies on a lot of BuildStream internals, a large refactoring will likely
    
    1565
    +require the test to be rewritten. Pure functions that only rely on the Python
    
    1566
    +Standard Library are excellent candidates for unit testing.
    
    1567
    +
    
    1550 1568
     
    
    1551 1569
     Measuring performance
    
    1552 1570
     ---------------------
    

  • buildstream/_artifactcache/cascache.py
    ... ... @@ -25,6 +25,7 @@ import stat
    25 25
     import tempfile
    
    26 26
     import uuid
    
    27 27
     import errno
    
    28
    +import contextlib
    
    28 29
     from urllib.parse import urlparse
    
    29 30
     
    
    30 31
     import grpc
    
    ... ... @@ -43,6 +44,13 @@ from .._exceptions import CASError
    43 44
     _MAX_PAYLOAD_BYTES = 1024 * 1024
    
    44 45
     
    
    45 46
     
    
    47
    +class BlobNotFound(CASError):
    
    48
    +
    
    49
    +    def __init__(self, blob, msg):
    
    50
    +        self.blob = blob
    
    51
    +        super().__init__(msg)
    
    52
    +
    
    53
    +
    
    46 54
     # A CASCache manages a CAS repository as specified in the Remote Execution API.
    
    47 55
     #
    
    48 56
     # Args:
    
    ... ... @@ -219,6 +227,8 @@ class CASCache():
    219 227
                     raise CASError("Failed to pull ref {}: {}".format(ref, e)) from e
    
    220 228
                 else:
    
    221 229
                     return False
    
    230
    +        except BlobNotFound as e:
    
    231
    +            return False
    
    222 232
     
    
    223 233
         # pull_tree():
    
    224 234
         #
    
    ... ... @@ -391,13 +401,14 @@ class CASCache():
    391 401
         #     digest (Digest): An optional Digest object to populate
    
    392 402
         #     path (str): Path to file to add
    
    393 403
         #     buffer (bytes): Byte buffer to add
    
    404
    +    #     link_directly (bool): Whether file given by path can be linked
    
    394 405
         #
    
    395 406
         # Returns:
    
    396 407
         #     (Digest): The digest of the added object
    
    397 408
         #
    
    398 409
         # Either `path` or `buffer` must be passed, but not both.
    
    399 410
         #
    
    400
    -    def add_object(self, *, digest=None, path=None, buffer=None):
    
    411
    +    def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False):
    
    401 412
             # Exactly one of the two parameters has to be specified
    
    402 413
             assert (path is None) != (buffer is None)
    
    403 414
     
    
    ... ... @@ -407,28 +418,34 @@ class CASCache():
    407 418
             try:
    
    408 419
                 h = hashlib.sha256()
    
    409 420
                 # Always write out new file to avoid corruption if input file is modified
    
    410
    -            with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
    
    411
    -                # Set mode bits to 0644
    
    412
    -                os.chmod(out.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
    
    413
    -
    
    414
    -                if path:
    
    415
    -                    with open(path, 'rb') as f:
    
    416
    -                        for chunk in iter(lambda: f.read(4096), b""):
    
    417
    -                            h.update(chunk)
    
    418
    -                            out.write(chunk)
    
    421
    +            with contextlib.ExitStack() as stack:
    
    422
    +                if path is not None and link_directly:
    
    423
    +                    tmp = stack.enter_context(open(path, 'rb'))
    
    424
    +                    for chunk in iter(lambda: tmp.read(4096), b""):
    
    425
    +                        h.update(chunk)
    
    419 426
                     else:
    
    420
    -                    h.update(buffer)
    
    421
    -                    out.write(buffer)
    
    427
    +                    tmp = stack.enter_context(tempfile.NamedTemporaryFile(dir=self.tmpdir))
    
    428
    +                    # Set mode bits to 0644
    
    429
    +                    os.chmod(tmp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
    
    422 430
     
    
    423
    -                out.flush()
    
    431
    +                    if path:
    
    432
    +                        with open(path, 'rb') as f:
    
    433
    +                            for chunk in iter(lambda: f.read(4096), b""):
    
    434
    +                                h.update(chunk)
    
    435
    +                                tmp.write(chunk)
    
    436
    +                    else:
    
    437
    +                        h.update(buffer)
    
    438
    +                        tmp.write(buffer)
    
    439
    +
    
    440
    +                    tmp.flush()
    
    424 441
     
    
    425 442
                     digest.hash = h.hexdigest()
    
    426
    -                digest.size_bytes = os.fstat(out.fileno()).st_size
    
    443
    +                digest.size_bytes = os.fstat(tmp.fileno()).st_size
    
    427 444
     
    
    428 445
                     # Place file at final location
    
    429 446
                     objpath = self.objpath(digest)
    
    430 447
                     os.makedirs(os.path.dirname(objpath), exist_ok=True)
    
    431
    -                os.link(out.name, objpath)
    
    448
    +                os.link(tmp.name, objpath)
    
    432 449
     
    
    433 450
             except FileExistsError as e:
    
    434 451
                 # We can ignore the failed link() if the object is already in the repo.
    
    ... ... @@ -526,6 +543,41 @@ class CASCache():
    526 543
             # first ref of this list will be the file modified earliest.
    
    527 544
             return [ref for _, ref in sorted(zip(mtimes, refs))]
    
    528 545
     
    
    546
    +    # list_objects():
    
    547
    +    #
    
    548
    +    # List cached objects in Least Recently Modified (LRM) order.
    
    549
    +    #
    
    550
    +    # Returns:
    
    551
    +    #     (list) - A list of objects and timestamps in LRM order
    
    552
    +    #
    
    553
    +    def list_objects(self):
    
    554
    +        objs = []
    
    555
    +        mtimes = []
    
    556
    +
    
    557
    +        for root, _, files in os.walk(os.path.join(self.casdir, 'objects')):
    
    558
    +            for filename in files:
    
    559
    +                obj_path = os.path.join(root, filename)
    
    560
    +                try:
    
    561
    +                    mtimes.append(os.path.getmtime(obj_path))
    
    562
    +                except FileNotFoundError:
    
    563
    +                    pass
    
    564
    +                else:
    
    565
    +                    objs.append(obj_path)
    
    566
    +
    
    567
    +        # NOTE: Sorted will sort from earliest to latest, thus the
    
    568
    +        # first element of this list will be the file modified earliest.
    
    569
    +        return sorted(zip(mtimes, objs))
    
    570
    +
    
    571
    +    def clean_up_refs_until(self, time):
    
    572
    +        ref_heads = os.path.join(self.casdir, 'refs', 'heads')
    
    573
    +
    
    574
    +        for root, _, files in os.walk(ref_heads):
    
    575
    +            for filename in files:
    
    576
    +                ref_path = os.path.join(root, filename)
    
    577
    +                # Obtain the mtime (the time a file was last modified)
    
    578
    +                if os.path.getmtime(ref_path) < time:
    
    579
    +                    os.unlink(ref_path)
    
    580
    +
    
    529 581
         # remove():
    
    530 582
         #
    
    531 583
         # Removes the given symbolic ref from the repo.
    
    ... ... @@ -585,6 +637,10 @@ class CASCache():
    585 637
     
    
    586 638
             return pruned
    
    587 639
     
    
    640
    +    def update_tree_mtime(self, tree):
    
    641
    +        reachable = set()
    
    642
    +        self._reachable_refs_dir(reachable, tree, update_mtime=True)
    
    643
    +
    
    588 644
         ################################################
    
    589 645
         #             Local Private Methods            #
    
    590 646
         ################################################
    
    ... ... @@ -729,10 +785,13 @@ class CASCache():
    729 785
                     a += 1
    
    730 786
                     b += 1
    
    731 787
     
    
    732
    -    def _reachable_refs_dir(self, reachable, tree):
    
    788
    +    def _reachable_refs_dir(self, reachable, tree, update_mtime=False):
    
    733 789
             if tree.hash in reachable:
    
    734 790
                 return
    
    735 791
     
    
    792
    +        if update_mtime:
    
    793
    +            os.utime(self.objpath(tree))
    
    794
    +
    
    736 795
             reachable.add(tree.hash)
    
    737 796
     
    
    738 797
             directory = remote_execution_pb2.Directory()
    
    ... ... @@ -741,10 +800,12 @@ class CASCache():
    741 800
                 directory.ParseFromString(f.read())
    
    742 801
     
    
    743 802
             for filenode in directory.files:
    
    803
    +            if update_mtime:
    
    804
    +                os.utime(self.objpath(filenode.digest))
    
    744 805
                 reachable.add(filenode.digest.hash)
    
    745 806
     
    
    746 807
             for dirnode in directory.directories:
    
    747
    -            self._reachable_refs_dir(reachable, dirnode.digest)
    
    808
    +            self._reachable_refs_dir(reachable, dirnode.digest, update_mtime=update_mtime)
    
    748 809
     
    
    749 810
         def _required_blobs(self, directory_digest):
    
    750 811
             # parse directory, and recursively add blobs
    
    ... ... @@ -798,7 +859,7 @@ class CASCache():
    798 859
             with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
    
    799 860
                 self._fetch_blob(remote, digest, f)
    
    800 861
     
    
    801
    -            added_digest = self.add_object(path=f.name)
    
    862
    +            added_digest = self.add_object(path=f.name, link_directly=True)
    
    802 863
                 assert added_digest.hash == digest.hash
    
    803 864
     
    
    804 865
             return objpath
    
    ... ... @@ -809,7 +870,7 @@ class CASCache():
    809 870
                     f.write(data)
    
    810 871
                     f.flush()
    
    811 872
     
    
    812
    -                added_digest = self.add_object(path=f.name)
    
    873
    +                added_digest = self.add_object(path=f.name, link_directly=True)
    
    813 874
                     assert added_digest.hash == digest.hash
    
    814 875
     
    
    815 876
         # Helper function for _fetch_directory().
    
    ... ... @@ -1113,6 +1174,9 @@ class _CASBatchRead():
    1113 1174
             batch_response = self._remote.cas.BatchReadBlobs(self._request)
    
    1114 1175
     
    
    1115 1176
             for response in batch_response.responses:
    
    1177
    +            if response.status.code == code_pb2.NOT_FOUND:
    
    1178
    +                raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format(
    
    1179
    +                    response.digest.hash, response.status.code))
    
    1116 1180
                 if response.status.code != code_pb2.OK:
    
    1117 1181
                     raise CASError("Failed to download blob {}: {}".format(
    
    1118 1182
                         response.digest.hash, response.status.code))
    

  • buildstream/_artifactcache/casserver.py
    ... ... @@ -24,6 +24,9 @@ import signal
    24 24
     import sys
    
    25 25
     import tempfile
    
    26 26
     import uuid
    
    27
    +import errno
    
    28
    +import ctypes
    
    29
    +import threading
    
    27 30
     
    
    28 31
     import click
    
    29 32
     import grpc
    
    ... ... @@ -31,6 +34,7 @@ import grpc
    31 34
     from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
    
    32 35
     from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
    
    33 36
     from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
    
    37
    +from .._protos.google.rpc import code_pb2
    
    34 38
     
    
    35 39
     from .._exceptions import CASError
    
    36 40
     
    
    ... ... @@ -55,18 +59,22 @@ class ArtifactTooLargeException(Exception):
    55 59
     #     repo (str): Path to CAS repository
    
    56 60
     #     enable_push (bool): Whether to allow blob uploads and artifact updates
    
    57 61
     #
    
    58
    -def create_server(repo, *, enable_push):
    
    62
    +def create_server(repo, *, enable_push,
    
    63
    +                  max_head_size=int(10e9),
    
    64
    +                  min_head_size=int(2e9)):
    
    59 65
         cas = CASCache(os.path.abspath(repo))
    
    60 66
     
    
    61 67
         # Use max_workers default from Python 3.5+
    
    62 68
         max_workers = (os.cpu_count() or 1) * 5
    
    63 69
         server = grpc.server(futures.ThreadPoolExecutor(max_workers))
    
    64 70
     
    
    71
    +    cache_cleaner = _CacheCleaner(cas, max_head_size, min_head_size)
    
    72
    +
    
    65 73
         bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
    
    66
    -        _ByteStreamServicer(cas, enable_push=enable_push), server)
    
    74
    +        _ByteStreamServicer(cas, cache_cleaner, enable_push=enable_push), server)
    
    67 75
     
    
    68 76
         remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
    
    69
    -        _ContentAddressableStorageServicer(cas, enable_push=enable_push), server)
    
    77
    +        _ContentAddressableStorageServicer(cas, cache_cleaner, enable_push=enable_push), server)
    
    70 78
     
    
    71 79
         remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
    
    72 80
             _CapabilitiesServicer(), server)
    
    ... ... @@ -84,9 +92,19 @@ def create_server(repo, *, enable_push):
    84 92
     @click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
    
    85 93
     @click.option('--enable-push', default=False, is_flag=True,
    
    86 94
                   help="Allow clients to upload blobs and update artifact cache")
    
    95
    +@click.option('--head-room-min', type=click.INT,
    
    96
    +              help="Disk head room minimum in bytes",
    
    97
    +              default=2e9)
    
    98
    +@click.option('--head-room-max', type=click.INT,
    
    99
    +              help="Disk head room maximum in bytes",
    
    100
    +              default=10e9)
    
    87 101
     @click.argument('repo')
    
    88
    -def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
    
    89
    -    server = create_server(repo, enable_push=enable_push)
    
    102
    +def server_main(repo, port, server_key, server_cert, client_certs, enable_push,
    
    103
    +                head_room_min, head_room_max):
    
    104
    +    server = create_server(repo,
    
    105
    +                           max_head_size=head_room_max,
    
    106
    +                           min_head_size=head_room_min,
    
    107
    +                           enable_push=enable_push)
    
    90 108
     
    
    91 109
         use_tls = bool(server_key)
    
    92 110
     
    
    ... ... @@ -127,11 +145,43 @@ def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
    127 145
             server.stop(0)
    
    128 146
     
    
    129 147
     
    
    148
    +class _FallocateCall:
    
    149
    +
    
    150
    +    FALLOC_FL_KEEP_SIZE = 1
    
    151
    +    FALLOC_FL_PUNCH_HOLE = 2
    
    152
    +    FALLOC_FL_NO_HIDE_STALE = 4
    
    153
    +    FALLOC_FL_COLLAPSE_RANGE = 8
    
    154
    +    FALLOC_FL_ZERO_RANGE = 16
    
    155
    +    FALLOC_FL_INSERT_RANGE = 32
    
    156
    +    FALLOC_FL_UNSHARE_RANGE = 64
    
    157
    +
    
    158
    +    def __init__(self):
    
    159
    +        self.libc = ctypes.CDLL("libc.so.6", use_errno=True)
    
    160
    +        try:
    
    161
    +            self.fallocate64 = self.libc.fallocate64
    
    162
    +        except AttributeError:
    
    163
    +            self.fallocate = self.libc.fallocate
    
    164
    +
    
    165
    +    def __call__(self, fd, mode, offset, length):
    
    166
    +        if hasattr(self, 'fallocate64'):
    
    167
    +            ret = self.fallocate64(ctypes.c_int(fd), ctypes.c_int(mode),
    
    168
    +                                   ctypes.c_int64(offset), ctypes.c_int64(length))
    
    169
    +        else:
    
    170
    +            ret = self.fallocate(ctypes.c_int(fd), ctypes.c_int(mode),
    
    171
    +                                 ctypes.c_int(offset), ctypes.c_int(length))
    
    172
    +        if ret == -1:
    
    173
    +            err = ctypes.get_errno()
    
    174
    +            raise OSError(errno, os.strerror(err))
    
    175
    +        return ret
    
    176
    +
    
    177
    +
    
    130 178
     class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
    
    131
    -    def __init__(self, cas, *, enable_push):
    
    179
    +    def __init__(self, cas, cache_cleaner, *, enable_push):
    
    132 180
             super().__init__()
    
    133 181
             self.cas = cas
    
    134 182
             self.enable_push = enable_push
    
    183
    +        self.fallocate = _FallocateCall()
    
    184
    +        self.cache_cleaner = cache_cleaner
    
    135 185
     
    
    136 186
         def Read(self, request, context):
    
    137 187
             resource_name = request.resource_name
    
    ... ... @@ -189,17 +239,34 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
    189 239
                             context.set_code(grpc.StatusCode.NOT_FOUND)
    
    190 240
                             return response
    
    191 241
     
    
    192
    -                    try:
    
    193
    -                        _clean_up_cache(self.cas, client_digest.size_bytes)
    
    194
    -                    except ArtifactTooLargeException as e:
    
    195
    -                        context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
    
    196
    -                        context.set_details(str(e))
    
    197
    -                        return response
    
    242
    +                    while True:
    
    243
    +                        if client_digest.size_bytes == 0:
    
    244
    +                            break
    
    245
    +                        try:
    
    246
    +                            self.cache_cleaner.clean_up(client_digest.size_bytes)
    
    247
    +                        except ArtifactTooLargeException as e:
    
    248
    +                            context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
    
    249
    +                            context.set_details(str(e))
    
    250
    +                            return response
    
    251
    +
    
    252
    +                        try:
    
    253
    +                            self.fallocate(out.fileno(), 0, 0, client_digest.size_bytes)
    
    254
    +                            break
    
    255
    +                        except OSError as e:
    
    256
    +                            # Multiple upload can happen in the same time
    
    257
    +                            if e.errno != errno.ENOSPC:
    
    258
    +                                raise
    
    259
    +
    
    198 260
                     elif request.resource_name:
    
    199 261
                         # If it is set on subsequent calls, it **must** match the value of the first request.
    
    200 262
                         if request.resource_name != resource_name:
    
    201 263
                             context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
    
    202 264
                             return response
    
    265
    +
    
    266
    +                if (offset + len(request.data)) > client_digest.size_bytes:
    
    267
    +                    context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
    
    268
    +                    return response
    
    269
    +
    
    203 270
                     out.write(request.data)
    
    204 271
                     offset += len(request.data)
    
    205 272
                     if request.finish_write:
    
    ... ... @@ -207,7 +274,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
    207 274
                             context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
    
    208 275
                             return response
    
    209 276
                         out.flush()
    
    210
    -                    digest = self.cas.add_object(path=out.name)
    
    277
    +                    digest = self.cas.add_object(path=out.name, link_directly=True)
    
    211 278
                         if digest.hash != client_digest.hash:
    
    212 279
                             context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
    
    213 280
                             return response
    
    ... ... @@ -220,18 +287,26 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
    220 287
     
    
    221 288
     
    
    222 289
     class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
    
    223
    -    def __init__(self, cas, *, enable_push):
    
    290
    +    def __init__(self, cas, cache_cleaner, *, enable_push):
    
    224 291
             super().__init__()
    
    225 292
             self.cas = cas
    
    226 293
             self.enable_push = enable_push
    
    294
    +        self.cache_cleaner = cache_cleaner
    
    227 295
     
    
    228 296
         def FindMissingBlobs(self, request, context):
    
    229 297
             response = remote_execution_pb2.FindMissingBlobsResponse()
    
    230 298
             for digest in request.blob_digests:
    
    231
    -            if not _has_object(self.cas, digest):
    
    232
    -                d = response.missing_blob_digests.add()
    
    233
    -                d.hash = digest.hash
    
    234
    -                d.size_bytes = digest.size_bytes
    
    299
    +            objpath = self.cas.objpath(digest)
    
    300
    +            try:
    
    301
    +                os.utime(objpath)
    
    302
    +            except OSError as e:
    
    303
    +                if e.errno != errno.ENOENT:
    
    304
    +                    raise
    
    305
    +                else:
    
    306
    +                    d = response.missing_blob_digests.add()
    
    307
    +                    d.hash = digest.hash
    
    308
    +                    d.size_bytes = digest.size_bytes
    
    309
    +
    
    235 310
             return response
    
    236 311
     
    
    237 312
         def BatchReadBlobs(self, request, context):
    
    ... ... @@ -250,12 +325,12 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
    250 325
                 try:
    
    251 326
                     with open(self.cas.objpath(digest), 'rb') as f:
    
    252 327
                         if os.fstat(f.fileno()).st_size != digest.size_bytes:
    
    253
    -                        blob_response.status.code = grpc.StatusCode.NOT_FOUND
    
    328
    +                        blob_response.status.code = code_pb2.NOT_FOUND
    
    254 329
                             continue
    
    255 330
     
    
    256 331
                         blob_response.data = f.read(digest.size_bytes)
    
    257 332
                 except FileNotFoundError:
    
    258
    -                blob_response.status.code = grpc.StatusCode.NOT_FOUND
    
    333
    +                blob_response.status.code = code_pb2.NOT_FOUND
    
    259 334
     
    
    260 335
             return response
    
    261 336
     
    
    ... ... @@ -285,7 +360,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
    285 360
                     continue
    
    286 361
     
    
    287 362
                 try:
    
    288
    -                _clean_up_cache(self.cas, digest.size_bytes)
    
    363
    +                self.cache_cleaner.clean_up(digest.size_bytes)
    
    289 364
     
    
    290 365
                     with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
    
    291 366
                         out.write(blob_request.data)
    
    ... ... @@ -328,6 +403,12 @@ class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
    328 403
     
    
    329 404
             try:
    
    330 405
                 tree = self.cas.resolve_ref(request.key, update_mtime=True)
    
    406
    +            try:
    
    407
    +                self.cas.update_tree_mtime(tree)
    
    408
    +            except FileNotFoundError:
    
    409
    +                self.cas.remove(request.key, defer_prune=True)
    
    410
    +                context.set_code(grpc.StatusCode.NOT_FOUND)
    
    411
    +                return response
    
    331 412
     
    
    332 413
                 response.digest.hash = tree.hash
    
    333 414
                 response.digest.size_bytes = tree.size_bytes
    
    ... ... @@ -400,60 +481,79 @@ def _digest_from_upload_resource_name(resource_name):
    400 481
             return None
    
    401 482
     
    
    402 483
     
    
    403
    -def _has_object(cas, digest):
    
    404
    -    objpath = cas.objpath(digest)
    
    405
    -    return os.path.exists(objpath)
    
    484
    +class _CacheCleaner:
    
    406 485
     
    
    486
    +    __cleanup_cache_lock = threading.Lock()
    
    407 487
     
    
    408
    -# _clean_up_cache()
    
    409
    -#
    
    410
    -# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
    
    411
    -# is enough space for the incoming artifact
    
    412
    -#
    
    413
    -# Args:
    
    414
    -#   cas: CASCache object
    
    415
    -#   object_size: The size of the object being received in bytes
    
    416
    -#
    
    417
    -# Returns:
    
    418
    -#   int: The total bytes removed on the filesystem
    
    419
    -#
    
    420
    -def _clean_up_cache(cas, object_size):
    
    421
    -    # Determine the available disk space, in bytes, of the file system
    
    422
    -    # which mounts the repo
    
    423
    -    stats = os.statvfs(cas.casdir)
    
    424
    -    buffer_ = int(2e9)                # Add a 2 GB buffer
    
    425
    -    free_disk_space = (stats.f_bfree * stats.f_bsize) - buffer_
    
    426
    -    total_disk_space = (stats.f_blocks * stats.f_bsize) - buffer_
    
    427
    -
    
    428
    -    if object_size > total_disk_space:
    
    429
    -        raise ArtifactTooLargeException("Artifact of size: {} is too large for "
    
    430
    -                                        "the filesystem which mounts the remote "
    
    431
    -                                        "cache".format(object_size))
    
    432
    -
    
    433
    -    if object_size <= free_disk_space:
    
    434
    -        # No need to clean up
    
    435
    -        return 0
    
    436
    -
    
    437
    -    # obtain a list of LRP artifacts
    
    438
    -    LRP_artifacts = cas.list_refs()
    
    439
    -
    
    440
    -    removed_size = 0  # in bytes
    
    441
    -    while object_size - removed_size > free_disk_space:
    
    442
    -        try:
    
    443
    -            to_remove = LRP_artifacts.pop(0)  # The first element in the list is the LRP artifact
    
    444
    -        except IndexError:
    
    445
    -            # This exception is caught if there are no more artifacts in the list
    
    446
    -            # LRP_artifacts. This means the the artifact is too large for the filesystem
    
    447
    -            # so we abort the process
    
    448
    -            raise ArtifactTooLargeException("Artifact of size {} is too large for "
    
    449
    -                                            "the filesystem which mounts the remote "
    
    450
    -                                            "cache".format(object_size))
    
    488
    +    def __init__(self, cas, max_head_size, min_head_size=int(2e9)):
    
    489
    +        self.__cas = cas
    
    490
    +        self.__max_head_size = max_head_size
    
    491
    +        self.__min_head_size = min_head_size
    
    451 492
     
    
    452
    -        removed_size += cas.remove(to_remove, defer_prune=False)
    
    493
    +    def __has_space(self, object_size):
    
    494
    +        stats = os.statvfs(self.__cas.casdir)
    
    495
    +        free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
    
    496
    +        total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
    
    453 497
     
    
    454
    -    if removed_size > 0:
    
    455
    -        logging.info("Successfully removed {} bytes from the cache".format(removed_size))
    
    456
    -    else:
    
    457
    -        logging.info("No artifacts were removed from the cache.")
    
    498
    +        if object_size > total_disk_space:
    
    499
    +            raise ArtifactTooLargeException("Artifact of size: {} is too large for "
    
    500
    +                                            "the filesystem which mounts the remote "
    
    501
    +                                            "cache".format(object_size))
    
    458 502
     
    
    459
    -    return removed_size
    503
    +        return object_size <= free_disk_space
    
    504
    +
    
    505
    +    # _clean_up_cache()
    
    506
    +    #
    
    507
    +    # Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
    
    508
    +    # is enough space for the incoming artifact
    
    509
    +    #
    
    510
    +    # Args:
    
    511
    +    #   object_size: The size of the object being received in bytes
    
    512
    +    #
    
    513
    +    # Returns:
    
    514
    +    #   int: The total bytes removed on the filesystem
    
    515
    +    #
    
    516
    +    def clean_up(self, object_size):
    
    517
    +        if self.__has_space(object_size):
    
    518
    +            return 0
    
    519
    +
    
    520
    +        with _CacheCleaner.__cleanup_cache_lock:
    
    521
    +            if self.__has_space(object_size):
    
    522
    +                # Another thread has done the cleanup for us
    
    523
    +                return 0
    
    524
    +
    
    525
    +            stats = os.statvfs(self.__cas.casdir)
    
    526
    +            target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
    
    527
    +
    
    528
    +            # obtain a list of LRP artifacts
    
    529
    +            LRP_objects = self.__cas.list_objects()
    
    530
    +
    
    531
    +            removed_size = 0  # in bytes
    
    532
    +            last_mtime = 0
    
    533
    +
    
    534
    +            while object_size - removed_size > target_disk_space:
    
    535
    +                try:
    
    536
    +                    last_mtime, to_remove = LRP_objects.pop(0)  # The first element in the list is the LRP artifact
    
    537
    +                except IndexError:
    
    538
    +                    # This exception is caught if there are no more artifacts in the list
    
    539
    +                    # LRP_artifacts. This means the the artifact is too large for the filesystem
    
    540
    +                    # so we abort the process
    
    541
    +                    raise ArtifactTooLargeException("Artifact of size {} is too large for "
    
    542
    +                                                    "the filesystem which mounts the remote "
    
    543
    +                                                    "cache".format(object_size))
    
    544
    +
    
    545
    +                try:
    
    546
    +                    size = os.stat(to_remove).st_size
    
    547
    +                    os.unlink(to_remove)
    
    548
    +                    removed_size += size
    
    549
    +                except FileNotFoundError:
    
    550
    +                    pass
    
    551
    +
    
    552
    +            self.__cas.clean_up_refs_until(last_mtime)
    
    553
    +
    
    554
    +            if removed_size > 0:
    
    555
    +                logging.info("Successfully removed {} bytes from the cache".format(removed_size))
    
    556
    +            else:
    
    557
    +                logging.info("No artifacts were removed from the cache.")
    
    558
    +
    
    559
    +            return removed_size

  • tests/frontend/push.py
    ... ... @@ -230,6 +230,8 @@ def test_artifact_expires(cli, datafiles, tmpdir):
    230 230
         # Create an artifact share (remote artifact cache) in the tmpdir/artifactshare
    
    231 231
         # Mock a file system with 12 MB free disk space
    
    232 232
         with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'),
    
    233
    +                               min_head_size=int(2e9),
    
    234
    +                               max_head_size=int(2e9),
    
    233 235
                                    total_space=int(10e9), free_space=(int(12e6) + int(2e9))) as share:
    
    234 236
     
    
    235 237
             # Configure bst to push to the cache
    
    ... ... @@ -313,6 +315,8 @@ def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir):
    313 315
         # Create an artifact share (remote cache) in tmpdir/artifactshare
    
    314 316
         # Mock a file system with 12 MB free disk space
    
    315 317
         with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'),
    
    318
    +                               min_head_size=int(2e9),
    
    319
    +                               max_head_size=int(2e9),
    
    316 320
                                    total_space=int(10e9), free_space=(int(12e6) + int(2e9))) as share:
    
    317 321
     
    
    318 322
             # Configure bst to push to the cache
    

  • tests/testutils/artifactshare.py
    ... ... @@ -29,7 +29,11 @@ from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution
    29 29
     #
    
    30 30
     class ArtifactShare():
    
    31 31
     
    
    32
    -    def __init__(self, directory, *, total_space=None, free_space=None):
    
    32
    +    def __init__(self, directory, *,
    
    33
    +                 total_space=None,
    
    34
    +                 free_space=None,
    
    35
    +                 min_head_size=int(2e9),
    
    36
    +                 max_head_size=int(10e9)):
    
    33 37
     
    
    34 38
             # The working directory for the artifact share (in case it
    
    35 39
             # needs to do something outside of its backend's storage folder).
    
    ... ... @@ -53,6 +57,9 @@ class ArtifactShare():
    53 57
             self.total_space = total_space
    
    54 58
             self.free_space = free_space
    
    55 59
     
    
    60
    +        self.max_head_size = max_head_size
    
    61
    +        self.min_head_size = min_head_size
    
    62
    +
    
    56 63
             q = Queue()
    
    57 64
     
    
    58 65
             self.process = Process(target=self.run, args=(q,))
    
    ... ... @@ -76,7 +83,10 @@ class ArtifactShare():
    76 83
                     self.free_space = self.total_space
    
    77 84
                 os.statvfs = self._mock_statvfs
    
    78 85
     
    
    79
    -        server = create_server(self.repodir, enable_push=True)
    
    86
    +        server = create_server(self.repodir,
    
    87
    +                               max_head_size=self.max_head_size,
    
    88
    +                               min_head_size=self.min_head_size,
    
    89
    +                               enable_push=True)
    
    80 90
             port = server.add_insecure_port('localhost:0')
    
    81 91
     
    
    82 92
             server.start()
    
    ... ... @@ -134,6 +144,15 @@ class ArtifactShare():
    134 144
     
    
    135 145
             try:
    
    136 146
                 tree = self.cas.resolve_ref(artifact_key)
    
    147
    +            reachable = set()
    
    148
    +            try:
    
    149
    +                self.cas._reachable_refs_dir(reachable, tree, update_mtime=False)
    
    150
    +            except FileNotFoundError:
    
    151
    +                return False
    
    152
    +            for digest in reachable:
    
    153
    +                object_name = os.path.join(self.cas.casdir, 'objects', digest[:2], digest[2:])
    
    154
    +                if not os.path.exists(object_name):
    
    155
    +                    return False
    
    137 156
                 return True
    
    138 157
             except CASError:
    
    139 158
                 return False
    
    ... ... @@ -165,8 +184,11 @@ class ArtifactShare():
    165 184
     # Create an ArtifactShare for use in a test case
    
    166 185
     #
    
    167 186
     @contextmanager
    
    168
    -def create_artifact_share(directory, *, total_space=None, free_space=None):
    
    169
    -    share = ArtifactShare(directory, total_space=total_space, free_space=free_space)
    
    187
    +def create_artifact_share(directory, *, total_space=None, free_space=None,
    
    188
    +                          min_head_size=int(2e9),
    
    189
    +                          max_head_size=int(10e9)):
    
    190
    +    share = ArtifactShare(directory, total_space=total_space, free_space=free_space,
    
    191
    +                          min_head_size=min_head_size, max_head_size=max_head_size)
    
    170 192
         try:
    
    171 193
             yield share
    
    172 194
         finally:
    



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]