[Notes] [Git][BuildStream/buildstream][juerg/command-batching] 48 commits: _platform: Fix get_cpu_count() with cap=None



Title: GitLab

Jürg Billeter pushed to branch juerg/command-batching at BuildStream / buildstream

Commits:

21 changed files:

Changes:

  • .gitlab-ci.yml
    ... ... @@ -145,7 +145,8 @@ docs:
    145 145
       stage: test
    
    146 146
       script:
    
    147 147
       - export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
    
    148
    -  - pip3 install sphinx
    
    148
    +  # Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
    
    149
    +  - pip3 install sphinx==1.7.9
    
    149 150
       - pip3 install sphinx-click
    
    150 151
       - pip3 install sphinx_rtd_theme
    
    151 152
       - cd dist && ./unpack.sh && cd buildstream
    
    ... ... @@ -161,14 +162,14 @@ docs:
    161 162
     .overnight-tests: &overnight-tests-template
    
    162 163
       stage: test
    
    163 164
       variables:
    
    164
    -    bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
    
    165
    -    bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
    
    166
    -    fd_sdk_ref: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.12
    
    165
    +    BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
    
    166
    +    BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
    
    167
    +    FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
    
    167 168
       before_script:
    
    168 169
       - (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
    
    169
    -  - pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
    
    170
    +  - pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
    
    170 171
       - git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
    
    171
    -  - git -C freedesktop-sdk checkout ${fd_sdk_ref}
    
    172
    +  - git -C freedesktop-sdk checkout ${FD_SDK_REF}
    
    172 173
       only:
    
    173 174
       - schedules
    
    174 175
     
    

  • buildstream/_artifactcache/cascache.py
    ... ... @@ -506,7 +506,7 @@ class CASCache(ArtifactCache):
    506 506
         def set_ref(self, ref, tree):
    
    507 507
             refpath = self._refpath(ref)
    
    508 508
             os.makedirs(os.path.dirname(refpath), exist_ok=True)
    
    509
    -        with utils.save_file_atomic(refpath, 'wb') as f:
    
    509
    +        with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
    
    510 510
                 f.write(tree.SerializeToString())
    
    511 511
     
    
    512 512
         # resolve_ref():
    
    ... ... @@ -1048,10 +1048,29 @@ class CASCache(ArtifactCache):
    1048 1048
                     missing_blobs[d.hash] = d
    
    1049 1049
     
    
    1050 1050
             # Upload any blobs missing on the server
    
    1051
    -        for blob_digest in missing_blobs.values():
    
    1052
    -            with open(self.objpath(blob_digest), 'rb') as f:
    
    1053
    -                assert os.fstat(f.fileno()).st_size == blob_digest.size_bytes
    
    1054
    -                self._send_blob(remote, blob_digest, f, u_uid=u_uid)
    
    1051
    +        self._send_blobs(remote, missing_blobs.values(), u_uid)
    
    1052
    +
    
    1053
    +    def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
    
    1054
    +        batch = _CASBatchUpdate(remote)
    
    1055
    +
    
    1056
    +        for digest in digests:
    
    1057
    +            with open(self.objpath(digest), 'rb') as f:
    
    1058
    +                assert os.fstat(f.fileno()).st_size == digest.size_bytes
    
    1059
    +
    
    1060
    +                if (digest.size_bytes >= remote.max_batch_total_size_bytes or
    
    1061
    +                        not remote.batch_update_supported):
    
    1062
    +                    # Too large for batch request, upload in independent request.
    
    1063
    +                    self._send_blob(remote, digest, f, u_uid=u_uid)
    
    1064
    +                else:
    
    1065
    +                    if not batch.add(digest, f):
    
    1066
    +                        # Not enough space left in batch request.
    
    1067
    +                        # Complete pending batch first.
    
    1068
    +                        batch.send()
    
    1069
    +                        batch = _CASBatchUpdate(remote)
    
    1070
    +                        batch.add(digest, f)
    
    1071
    +
    
    1072
    +        # Send final batch
    
    1073
    +        batch.send()
    
    1055 1074
     
    
    1056 1075
     
    
    1057 1076
     # Represents a single remote CAS cache.
    
    ... ... @@ -1126,6 +1145,17 @@ class _CASRemote():
    1126 1145
                     if e.code() != grpc.StatusCode.UNIMPLEMENTED:
    
    1127 1146
                         raise
    
    1128 1147
     
    
    1148
    +            # Check whether the server supports BatchUpdateBlobs()
    
    1149
    +            self.batch_update_supported = False
    
    1150
    +            try:
    
    1151
    +                request = remote_execution_pb2.BatchUpdateBlobsRequest()
    
    1152
    +                response = self.cas.BatchUpdateBlobs(request)
    
    1153
    +                self.batch_update_supported = True
    
    1154
    +            except grpc.RpcError as e:
    
    1155
    +                if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
    
    1156
    +                        e.code() != grpc.StatusCode.PERMISSION_DENIED):
    
    1157
    +                    raise
    
    1158
    +
    
    1129 1159
                 self._initialized = True
    
    1130 1160
     
    
    1131 1161
     
    
    ... ... @@ -1173,6 +1203,46 @@ class _CASBatchRead():
    1173 1203
                 yield (response.digest, response.data)
    
    1174 1204
     
    
    1175 1205
     
    
    1206
    +# Represents a batch of blobs queued for upload.
    
    1207
    +#
    
    1208
    +class _CASBatchUpdate():
    
    1209
    +    def __init__(self, remote):
    
    1210
    +        self._remote = remote
    
    1211
    +        self._max_total_size_bytes = remote.max_batch_total_size_bytes
    
    1212
    +        self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
    
    1213
    +        self._size = 0
    
    1214
    +        self._sent = False
    
    1215
    +
    
    1216
    +    def add(self, digest, stream):
    
    1217
    +        assert not self._sent
    
    1218
    +
    
    1219
    +        new_batch_size = self._size + digest.size_bytes
    
    1220
    +        if new_batch_size > self._max_total_size_bytes:
    
    1221
    +            # Not enough space left in current batch
    
    1222
    +            return False
    
    1223
    +
    
    1224
    +        blob_request = self._request.requests.add()
    
    1225
    +        blob_request.digest.hash = digest.hash
    
    1226
    +        blob_request.digest.size_bytes = digest.size_bytes
    
    1227
    +        blob_request.data = stream.read(digest.size_bytes)
    
    1228
    +        self._size = new_batch_size
    
    1229
    +        return True
    
    1230
    +
    
    1231
    +    def send(self):
    
    1232
    +        assert not self._sent
    
    1233
    +        self._sent = True
    
    1234
    +
    
    1235
    +        if len(self._request.requests) == 0:
    
    1236
    +            return
    
    1237
    +
    
    1238
    +        batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
    
    1239
    +
    
    1240
    +        for response in batch_response.responses:
    
    1241
    +            if response.status.code != grpc.StatusCode.OK.value[0]:
    
    1242
    +                raise ArtifactError("Failed to upload blob {}: {}".format(
    
    1243
    +                    response.digest.hash, response.status.code))
    
    1244
    +
    
    1245
    +
    
    1176 1246
     def _grouper(iterable, n):
    
    1177 1247
         while True:
    
    1178 1248
             try:
    

  • buildstream/_artifactcache/casserver.py
    ... ... @@ -68,7 +68,7 @@ def create_server(repo, *, enable_push):
    68 68
             _ByteStreamServicer(artifactcache, enable_push=enable_push), server)
    
    69 69
     
    
    70 70
         remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
    
    71
    -        _ContentAddressableStorageServicer(artifactcache), server)
    
    71
    +        _ContentAddressableStorageServicer(artifactcache, enable_push=enable_push), server)
    
    72 72
     
    
    73 73
         remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
    
    74 74
             _CapabilitiesServicer(), server)
    
    ... ... @@ -222,9 +222,10 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
    222 222
     
    
    223 223
     
    
    224 224
     class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
    
    225
    -    def __init__(self, cas):
    
    225
    +    def __init__(self, cas, *, enable_push):
    
    226 226
             super().__init__()
    
    227 227
             self.cas = cas
    
    228
    +        self.enable_push = enable_push
    
    228 229
     
    
    229 230
         def FindMissingBlobs(self, request, context):
    
    230 231
             response = remote_execution_pb2.FindMissingBlobsResponse()
    
    ... ... @@ -260,6 +261,46 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
    260 261
     
    
    261 262
             return response
    
    262 263
     
    
    264
    +    def BatchUpdateBlobs(self, request, context):
    
    265
    +        response = remote_execution_pb2.BatchUpdateBlobsResponse()
    
    266
    +
    
    267
    +        if not self.enable_push:
    
    268
    +            context.set_code(grpc.StatusCode.PERMISSION_DENIED)
    
    269
    +            return response
    
    270
    +
    
    271
    +        batch_size = 0
    
    272
    +
    
    273
    +        for blob_request in request.requests:
    
    274
    +            digest = blob_request.digest
    
    275
    +
    
    276
    +            batch_size += digest.size_bytes
    
    277
    +            if batch_size > _MAX_PAYLOAD_BYTES:
    
    278
    +                context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
    
    279
    +                return response
    
    280
    +
    
    281
    +            blob_response = response.responses.add()
    
    282
    +            blob_response.digest.hash = digest.hash
    
    283
    +            blob_response.digest.size_bytes = digest.size_bytes
    
    284
    +
    
    285
    +            if len(blob_request.data) != digest.size_bytes:
    
    286
    +                blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
    
    287
    +                continue
    
    288
    +
    
    289
    +            try:
    
    290
    +                _clean_up_cache(self.cas, digest.size_bytes)
    
    291
    +
    
    292
    +                with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
    
    293
    +                    out.write(blob_request.data)
    
    294
    +                    out.flush()
    
    295
    +                    server_digest = self.cas.add_object(path=out.name)
    
    296
    +                    if server_digest.hash != digest.hash:
    
    297
    +                        blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
    
    298
    +
    
    299
    +            except ArtifactTooLargeException:
    
    300
    +                blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
    
    301
    +
    
    302
    +        return response
    
    303
    +
    
    263 304
     
    
    264 305
     class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
    
    265 306
         def GetCapabilities(self, request, context):
    

  • buildstream/_platform/darwin.py
    ... ... @@ -34,6 +34,9 @@ class Darwin(Platform):
    34 34
             super().__init__()
    
    35 35
     
    
    36 36
         def create_sandbox(self, *args, **kwargs):
    
    37
    +        kwargs['dummy_reason'] = \
    
    38
    +            "OSXFUSE is not supported and there are no supported sandbox" + \
    
    39
    +            "technologies for OSX at this time"
    
    37 40
             return SandboxDummy(*args, **kwargs)
    
    38 41
     
    
    39 42
         def check_sandbox_config(self, config):
    
    ... ... @@ -41,10 +44,11 @@ class Darwin(Platform):
    41 44
             return True
    
    42 45
     
    
    43 46
         def get_cpu_count(self, cap=None):
    
    44
    -        if cap < os.cpu_count():
    
    45
    -            return cap
    
    47
    +        cpu_count = os.cpu_count()
    
    48
    +        if cap is None:
    
    49
    +            return cpu_count
    
    46 50
             else:
    
    47
    -            return os.cpu_count()
    
    51
    +            return min(cpu_count, cap)
    
    48 52
     
    
    49 53
         def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
    
    50 54
             super().set_resource_limits(soft_limit)

  • buildstream/_platform/linux.py
    ... ... @@ -37,24 +37,30 @@ class Linux(Platform):
    37 37
             self._uid = os.geteuid()
    
    38 38
             self._gid = os.getegid()
    
    39 39
     
    
    40
    +        self._have_fuse = os.path.exists("/dev/fuse")
    
    41
    +        self._bwrap_exists = _site.check_bwrap_version(0, 0, 0)
    
    42
    +        self._have_good_bwrap = _site.check_bwrap_version(0, 1, 2)
    
    43
    +
    
    44
    +        self._local_sandbox_available = self._have_fuse and self._have_good_bwrap
    
    45
    +
    
    40 46
             self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
    
    41 47
     
    
    42
    -        if self._local_sandbox_available():
    
    48
    +        if self._local_sandbox_available:
    
    43 49
                 self._user_ns_available = self._check_user_ns_available()
    
    44 50
             else:
    
    45 51
                 self._user_ns_available = False
    
    46 52
     
    
    47 53
         def create_sandbox(self, *args, **kwargs):
    
    48
    -        if not self._local_sandbox_available():
    
    49
    -            return SandboxDummy(*args, **kwargs)
    
    54
    +        if not self._local_sandbox_available:
    
    55
    +            return self._create_dummy_sandbox(*args, **kwargs)
    
    50 56
             else:
    
    51
    -            from ..sandbox._sandboxbwrap import SandboxBwrap
    
    52
    -            # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
    
    53
    -            kwargs['user_ns_available'] = self._user_ns_available
    
    54
    -            kwargs['die_with_parent_available'] = self._die_with_parent_available
    
    55
    -            return SandboxBwrap(*args, **kwargs)
    
    57
    +            return self._create_bwrap_sandbox(*args, **kwargs)
    
    56 58
     
    
    57 59
         def check_sandbox_config(self, config):
    
    60
    +        if not self._local_sandbox_available:
    
    61
    +            # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
    
    62
    +            return True
    
    63
    +
    
    58 64
             if self._user_ns_available:
    
    59 65
                 # User namespace support allows arbitrary build UID/GID settings.
    
    60 66
                 return True
    
    ... ... @@ -66,11 +72,26 @@ class Linux(Platform):
    66 72
         ################################################
    
    67 73
         #              Private Methods                 #
    
    68 74
         ################################################
    
    69
    -    def _local_sandbox_available(self):
    
    70
    -        try:
    
    71
    -            return os.path.exists(utils.get_host_tool('bwrap')) and os.path.exists('/dev/fuse')
    
    72
    -        except utils.ProgramNotFoundError:
    
    73
    -            return False
    
    75
    +
    
    76
    +    def _create_dummy_sandbox(self, *args, **kwargs):
    
    77
    +        reasons = []
    
    78
    +        if not self._have_fuse:
    
    79
    +            reasons.append("FUSE is unavailable")
    
    80
    +        if not self._have_good_bwrap:
    
    81
    +            if self._bwrap_exists:
    
    82
    +                reasons.append("`bwrap` is too old (bst needs at least 0.1.2)")
    
    83
    +            else:
    
    84
    +                reasons.append("`bwrap` executable not found")
    
    85
    +
    
    86
    +        kwargs['dummy_reason'] = " and ".join(reasons)
    
    87
    +        return SandboxDummy(*args, **kwargs)
    
    88
    +
    
    89
    +    def _create_bwrap_sandbox(self, *args, **kwargs):
    
    90
    +        from ..sandbox._sandboxbwrap import SandboxBwrap
    
    91
    +        # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
    
    92
    +        kwargs['user_ns_available'] = self._user_ns_available
    
    93
    +        kwargs['die_with_parent_available'] = self._die_with_parent_available
    
    94
    +        return SandboxBwrap(*args, **kwargs)
    
    74 95
     
    
    75 96
         def _check_user_ns_available(self):
    
    76 97
             # Here, lets check if bwrap is able to create user namespaces,
    

  • buildstream/_platform/platform.py
    ... ... @@ -67,7 +67,11 @@ class Platform():
    67 67
             return cls._instance
    
    68 68
     
    
    69 69
         def get_cpu_count(self, cap=None):
    
    70
    -        return min(len(os.sched_getaffinity(0)), cap)
    
    70
    +        cpu_count = len(os.sched_getaffinity(0))
    
    71
    +        if cap is None:
    
    72
    +            return cpu_count
    
    73
    +        else:
    
    74
    +            return min(cpu_count, cap)
    
    71 75
     
    
    72 76
         ##################################################################
    
    73 77
         #                        Sandbox functions                       #
    

  • buildstream/_scheduler/jobs/job.py
    ... ... @@ -119,6 +119,8 @@ class Job():
    119 119
             self._result = None                    # Return value of child action in the parent
    
    120 120
             self._tries = 0                        # Try count, for retryable jobs
    
    121 121
             self._skipped_flag = False             # Indicate whether the job was skipped.
    
    122
    +        self._terminated = False               # Whether this job has been explicitly terminated
    
    123
    +
    
    122 124
             # If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
    
    123 125
             #
    
    124 126
             self._retry_flag = True
    
    ... ... @@ -190,6 +192,8 @@ class Job():
    190 192
             # Terminate the process using multiprocessing API pathway
    
    191 193
             self._process.terminate()
    
    192 194
     
    
    195
    +        self._terminated = True
    
    196
    +
    
    193 197
         # terminate_wait()
    
    194 198
         #
    
    195 199
         # Wait for terminated jobs to complete
    
    ... ... @@ -273,18 +277,22 @@ class Job():
    273 277
         # running the integration commands).
    
    274 278
         #
    
    275 279
         # Args:
    
    276
    -    #     (int): The plugin identifier for this task
    
    280
    +    #     task_id (int): The plugin identifier for this task
    
    277 281
         #
    
    278 282
         def set_task_id(self, task_id):
    
    279 283
             self._task_id = task_id
    
    280 284
     
    
    281 285
         # skipped
    
    282 286
         #
    
    287
    +    # This will evaluate to True if the job was skipped
    
    288
    +    # during processing, or if it was forcefully terminated.
    
    289
    +    #
    
    283 290
         # Returns:
    
    284
    -    #    bool: True if the job was skipped while processing.
    
    291
    +    #    (bool): Whether the job should appear as skipped
    
    292
    +    #
    
    285 293
         @property
    
    286 294
         def skipped(self):
    
    287
    -        return self._skipped_flag
    
    295
    +        return self._skipped_flag or self._terminated
    
    288 296
     
    
    289 297
         #######################################################
    
    290 298
         #                  Abstract Methods                   #
    

  • buildstream/_scheduler/queues/queue.py
    ... ... @@ -326,16 +326,20 @@ class Queue():
    326 326
                               detail=traceback.format_exc())
    
    327 327
                 self.failed_elements.append(element)
    
    328 328
             else:
    
    329
    -
    
    330
    -            # No exception occured, handle the success/failure state in the normal way
    
    331 329
                 #
    
    330
    +            # No exception occured in post processing
    
    331
    +            #
    
    332
    +
    
    333
    +            # All jobs get placed on the done queue for later processing.
    
    332 334
                 self._done_queue.append(job)
    
    333 335
     
    
    334
    -            if success:
    
    335
    -                if not job.skipped:
    
    336
    -                    self.processed_elements.append(element)
    
    337
    -                else:
    
    338
    -                    self.skipped_elements.append(element)
    
    336
    +            # A Job can be skipped whether or not it has failed,
    
    337
    +            # we want to only bookkeep them as processed or failed
    
    338
    +            # if they are not skipped.
    
    339
    +            if job.skipped:
    
    340
    +                self.skipped_elements.append(element)
    
    341
    +            elif success:
    
    342
    +                self.processed_elements.append(element)
    
    339 343
                 else:
    
    340 344
                     self.failed_elements.append(element)
    
    341 345
     
    

  • buildstream/_scheduler/scheduler.py
    ... ... @@ -387,6 +387,15 @@ class Scheduler():
    387 387
         # A loop registered event callback for keyboard interrupts
    
    388 388
         #
    
    389 389
         def _interrupt_event(self):
    
    390
    +
    
    391
    +        # FIXME: This should not be needed, but for some reason we receive an
    
    392
    +        #        additional SIGINT event when the user hits ^C a second time
    
    393
    +        #        to inform us that they really intend to terminate; even though
    
    394
    +        #        we have disconnected our handlers at this time.
    
    395
    +        #
    
    396
    +        if self.terminated:
    
    397
    +            return
    
    398
    +
    
    390 399
             # Leave this to the frontend to decide, if no
    
    391 400
             # interrrupt callback was specified, then just terminate.
    
    392 401
             if self._interrupt_callback:
    

  • buildstream/_site.py
    ... ... @@ -78,18 +78,12 @@ def check_bwrap_version(major, minor, patch):
    78 78
             if not bwrap_path:
    
    79 79
                 return False
    
    80 80
             cmd = [bwrap_path, "--version"]
    
    81
    -        version = str(subprocess.check_output(cmd).split()[1], "utf-8")
    
    81
    +        try:
    
    82
    +            version = str(subprocess.check_output(cmd).split()[1], "utf-8")
    
    83
    +        except subprocess.CalledProcessError:
    
    84
    +            # Failure trying to run bubblewrap
    
    85
    +            return False
    
    82 86
             _bwrap_major, _bwrap_minor, _bwrap_patch = map(int, version.split("."))
    
    83 87
     
    
    84 88
         # Check whether the installed version meets the requirements
    
    85
    -    if _bwrap_major > major:
    
    86
    -        return True
    
    87
    -    elif _bwrap_major < major:
    
    88
    -        return False
    
    89
    -    else:
    
    90
    -        if _bwrap_minor > minor:
    
    91
    -            return True
    
    92
    -        elif _bwrap_minor < minor:
    
    93
    -            return False
    
    94
    -        else:
    
    95
    -            return _bwrap_patch >= patch
    89
    +    return (_bwrap_major, _bwrap_minor, _bwrap_patch) >= (major, minor, patch)

  • buildstream/buildelement.py
    ... ... @@ -186,7 +186,9 @@ class BuildElement(Element):
    186 186
     
    
    187 187
                 with self.timed_activity("Running {}".format(command_name)):
    
    188 188
                     for cmd in commands:
    
    189
    -                    self.__run_command(sandbox, cmd, command_name)
    
    189
    +                    self.__queue_command(sandbox, cmd, command_name)
    
    190
    +
    
    191
    +        sandbox.run_queue(SandboxFlags.ROOT_READ_ONLY)
    
    190 192
     
    
    191 193
             # %{install-root}/%{build-root} should normally not be written
    
    192 194
             # to - if an element later attempts to stage to a location
    
    ... ... @@ -210,7 +212,7 @@ class BuildElement(Element):
    210 212
             if commands:
    
    211 213
                 with self.timed_activity("Running configure-commands"):
    
    212 214
                     for cmd in commands:
    
    213
    -                    self.__run_command(sandbox, cmd, 'configure-commands')
    
    215
    +                    self.__queue_command(sandbox, cmd, 'configure-commands')
    
    214 216
     
    
    215 217
         def generate_script(self):
    
    216 218
             script = ""
    
    ... ... @@ -235,14 +237,18 @@ class BuildElement(Element):
    235 237
     
    
    236 238
             return commands
    
    237 239
     
    
    238
    -    def __run_command(self, sandbox, cmd, cmd_name):
    
    239
    -        self.status("Running {}".format(cmd_name), detail=cmd)
    
    240
    +    def __queue_command(self, sandbox, cmd, cmd_name):
    
    241
    +        def start_cb():
    
    242
    +            self.status("Running {}".format(cmd_name), detail=cmd)
    
    243
    +
    
    244
    +        def complete_cb(exitcode):
    
    245
    +            if exitcode != 0:
    
    246
    +                raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode),
    
    247
    +                                   collect=self.get_variable('install-root'))
    
    240 248
     
    
    241 249
             # Note the -e switch to 'sh' means to exit with an error
    
    242 250
             # if any untested command fails.
    
    243 251
             #
    
    244
    -        exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'],
    
    245
    -                               SandboxFlags.ROOT_READ_ONLY)
    
    246
    -        if exitcode != 0:
    
    247
    -            raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode),
    
    248
    -                               collect=self.get_variable('install-root'))
    252
    +        sandbox.queue(['sh', '-c', '-e', cmd + '\n'],
    
    253
    +                      start_callback=start_cb,
    
    254
    +                      complete_callback=complete_cb)

  • buildstream/element.py
    ... ... @@ -212,7 +212,7 @@ class Element(Plugin):
    212 212
             self.__staged_sources_directory = None  # Location where Element.stage_sources() was called
    
    213 213
             self.__tainted = None                   # Whether the artifact is tainted and should not be shared
    
    214 214
             self.__required = False                 # Whether the artifact is required in the current session
    
    215
    -        self.__build_result = None              # The result of assembling this Element
    
    215
    +        self.__build_result = None              # The result of assembling this Element (success, description, detail)
    
    216 216
             self._build_log_path = None            # The path of the build log for this Element
    
    217 217
     
    
    218 218
             # hash tables of loaded artifact metadata, hashed by key
    
    ... ... @@ -767,6 +767,8 @@ class Element(Plugin):
    767 767
             bstdata = self.get_public_data('bst')
    
    768 768
             environment = self.get_environment()
    
    769 769
     
    
    770
    +        # TODO support command batching
    
    771
    +
    
    770 772
             if bstdata is not None:
    
    771 773
                 commands = self.node_get_member(bstdata, list, 'integration-commands', [])
    
    772 774
                 for i in range(len(commands)):
    
    ... ... @@ -1379,10 +1381,10 @@ class Element(Plugin):
    1379 1381
                 if not vdirectory.is_empty():
    
    1380 1382
                     raise ElementError("Staging directory '{}' is not empty".format(vdirectory))
    
    1381 1383
     
    
    1382
    -            # While mkdtemp is advertised as using the TMP environment variable, it
    
    1383
    -            # doesn't, so this explicit extraction is necesasry.
    
    1384
    -            tmp_prefix = os.environ.get("TMP", None)
    
    1385
    -            temp_staging_directory = tempfile.mkdtemp(prefix=tmp_prefix)
    
    1384
    +            # It's advantageous to have this temporary directory on
    
    1385
    +            # the same filing system as the rest of our cache.
    
    1386
    +            temp_staging_location = os.path.join(self._get_context().artifactdir, "staging_temp")
    
    1387
    +            temp_staging_directory = tempfile.mkdtemp(prefix=temp_staging_location)
    
    1386 1388
     
    
    1387 1389
                 try:
    
    1388 1390
                     workspace = self._get_workspace()
    
    ... ... @@ -1479,11 +1481,13 @@ class Element(Plugin):
    1479 1481
     
    
    1480 1482
             self._update_state()
    
    1481 1483
     
    
    1482
    -        if self._get_workspace() and self._cached():
    
    1484
    +        if self._get_workspace() and self._cached_success():
    
    1485
    +            assert utils._is_main_process(), \
    
    1486
    +                "Attempted to save workspace configuration from child process"
    
    1483 1487
                 #
    
    1484 1488
                 # Note that this block can only happen in the
    
    1485
    -            # main process, since `self._cached()` cannot
    
    1486
    -            # be true when assembly is completed in the task.
    
    1489
    +            # main process, since `self._cached_success()` cannot
    
    1490
    +            # be true when assembly is successful in the task.
    
    1487 1491
                 #
    
    1488 1492
                 # For this reason, it is safe to update and
    
    1489 1493
                 # save the workspaces configuration
    
    ... ... @@ -2083,7 +2087,15 @@ class Element(Plugin):
    2083 2087
                 self.prepare(sandbox)
    
    2084 2088
     
    
    2085 2089
                 if workspace:
    
    2086
    -                workspace.prepared = True
    
    2090
    +                def mark_workspace_prepared():
    
    2091
    +                    workspace.prepared = True
    
    2092
    +
    
    2093
    +                if sandbox._has_commands_queued():
    
    2094
    +                    # Defer workspace.prepared setting until the queued
    
    2095
    +                    # prepare commands have been executed.
    
    2096
    +                    sandbox.queue(None, start_callback=mark_workspace_prepared)
    
    2097
    +                else:
    
    2098
    +                    mark_workspace_prepared()
    
    2087 2099
     
    
    2088 2100
         def __is_cached(self, keystrength):
    
    2089 2101
             if keystrength is None:
    

  • buildstream/plugins/sources/git.py
    ... ... @@ -184,10 +184,18 @@ class GitMirror(SourceFetcher):
    184 184
                              cwd=self.mirror)
    
    185 185
     
    
    186 186
         def fetch(self, alias_override=None):
    
    187
    -        self.ensure(alias_override)
    
    188
    -        if not self.has_ref():
    
    189
    -            self._fetch(alias_override)
    
    190
    -        self.assert_ref()
    
    187
    +        # Resolve the URL for the message
    
    188
    +        resolved_url = self.source.translate_url(self.url,
    
    189
    +                                                 alias_override=alias_override,
    
    190
    +                                                 primary=self.primary)
    
    191
    +
    
    192
    +        with self.source.timed_activity("Fetching from {}"
    
    193
    +                                        .format(resolved_url),
    
    194
    +                                        silent_nested=True):
    
    195
    +            self.ensure(alias_override)
    
    196
    +            if not self.has_ref():
    
    197
    +                self._fetch(alias_override)
    
    198
    +            self.assert_ref()
    
    191 199
     
    
    192 200
         def has_ref(self):
    
    193 201
             if not self.ref:
    

  • buildstream/sandbox/_sandboxdummy.py
    ... ... @@ -23,6 +23,7 @@ from . import Sandbox
    23 23
     class SandboxDummy(Sandbox):
    
    24 24
         def __init__(self, *args, **kwargs):
    
    25 25
             super().__init__(*args, **kwargs)
    
    26
    +        self._reason = kwargs.get("dummy_reason", "no reason given")
    
    26 27
     
    
    27 28
         def run(self, command, flags, *, cwd=None, env=None):
    
    28 29
     
    
    ... ... @@ -37,4 +38,4 @@ class SandboxDummy(Sandbox):
    37 38
                                    "'{}'".format(command[0]),
    
    38 39
                                    reason='missing-command')
    
    39 40
     
    
    40
    -        raise SandboxError("This platform does not support local builds")
    41
    +        raise SandboxError("This platform does not support local builds: {}".format(self._reason))

  • buildstream/sandbox/_sandboxremote.py
    ... ... @@ -19,6 +19,7 @@
    19 19
     #        Jim MacArthur <jim macarthur codethink co uk>
    
    20 20
     
    
    21 21
     import os
    
    22
    +import shlex
    
    22 23
     from urllib.parse import urlparse
    
    23 24
     
    
    24 25
     import grpc
    
    ... ... @@ -160,6 +161,8 @@ class SandboxRemote(Sandbox):
    160 161
             self._set_virtual_directory(new_dir)
    
    161 162
     
    
    162 163
         def run(self, command, flags, *, cwd=None, env=None):
    
    164
    +        stdout, stderr = self._get_output()
    
    165
    +
    
    163 166
             # Upload sources
    
    164 167
             upload_vdir = self.get_virtual_directory()
    
    165 168
     
    
    ... ... @@ -177,15 +180,11 @@ class SandboxRemote(Sandbox):
    177 180
             if not cascache.verify_digest_pushed(self._get_project(), upload_vdir.ref):
    
    178 181
                 raise SandboxError("Failed to verify that source has been pushed to the remote artifact cache.")
    
    179 182
     
    
    180
    -        # Set up environment and working directory
    
    181
    -        if cwd is None:
    
    182
    -            cwd = self._get_work_directory()
    
    183
    -
    
    184
    -        if cwd is None:
    
    185
    -            cwd = '/'
    
    186
    -
    
    187
    -        if env is None:
    
    188
    -            env = self._get_environment()
    
    183
    +        # Fallback to the sandbox default settings for
    
    184
    +        # the cwd and env.
    
    185
    +        #
    
    186
    +        cwd = self._get_work_directory(cwd=cwd)
    
    187
    +        env = self._get_environment(cwd=cwd, env=env)
    
    189 188
     
    
    190 189
             # We want command args as a list of strings
    
    191 190
             if isinstance(command, str):
    
    ... ... @@ -217,13 +216,54 @@ class SandboxRemote(Sandbox):
    217 216
     
    
    218 217
             action_result = execution_response.result
    
    219 218
     
    
    219
    +        if stdout:
    
    220
    +            if action_result.stdout_raw:
    
    221
    +                stdout.write(str(action_result.stdout_raw, 'utf-8', errors='ignore'))
    
    222
    +        if stderr:
    
    223
    +            if action_result.stderr_raw:
    
    224
    +                stderr.write(str(action_result.stderr_raw, 'utf-8', errors='ignore'))
    
    225
    +
    
    220 226
             if action_result.exit_code != 0:
    
    221 227
                 # A normal error during the build: the remote execution system
    
    222 228
                 # has worked correctly but the command failed.
    
    223
    -            # action_result.stdout and action_result.stderr also contains
    
    224
    -            # build command outputs which we ignore at the moment.
    
    225 229
                 return action_result.exit_code
    
    226 230
     
    
    227 231
             self.process_job_output(action_result.output_directories, action_result.output_files)
    
    228 232
     
    
    229 233
             return 0
    
    234
    +
    
    235
    +    def run_queue(self, flags, *, cwd=None, env=None):
    
    236
    +        queue = self._queue
    
    237
    +        self._queue = []
    
    238
    +
    
    239
    +        script = ""
    
    240
    +        i = 0
    
    241
    +        for entry in queue:
    
    242
    +            if entry.command:
    
    243
    +                cmdline = ' '.join(shlex.quote(cmd) for cmd in entry.command)
    
    244
    +                script += "({})\n".format(cmdline)
    
    245
    +                script += "RETVAL=$?\n"
    
    246
    +                script += "if [ $RETVAL -ne 0 ] ; then\n"
    
    247
    +                # Report failing command and exit code to stderr (and then back to client)
    
    248
    +                script += "  echo -e '\nbst-command-failure:' {} $RETVAL >&2\n".format(i)
    
    249
    +                script += "  exit 1\n"
    
    250
    +                script += "fi\n"
    
    251
    +            i += 1
    
    252
    +
    
    253
    +        exit_code = self.run(['sh', '-c', script], flags, cwd=cwd, env=env)
    
    254
    +
    
    255
    +        if exit_code != 0:
    
    256
    +            # TODO get failed command and exit code from stderr
    
    257
    +            failed_command = 0
    
    258
    +            command_exit_code = 1
    
    259
    +
    
    260
    +        i = 0
    
    261
    +        for entry in queue:
    
    262
    +            entry.start_callback()
    
    263
    +            if exit_code == 0 or i < failed_command:
    
    264
    +                # Command succeeded
    
    265
    +                entry.complete_callback(0)
    
    266
    +            else:
    
    267
    +                # Command failed
    
    268
    +                entry.complete_callback(command_exit_code)
    
    269
    +                break

  • buildstream/sandbox/sandbox.py
    ... ... @@ -29,6 +29,8 @@ See also: :ref:`sandboxing`.
    29 29
     """
    
    30 30
     
    
    31 31
     import os
    
    32
    +from collections import namedtuple
    
    33
    +
    
    32 34
     from .._exceptions import ImplError, BstError
    
    33 35
     from ..storage._filebaseddirectory import FileBasedDirectory
    
    34 36
     from ..storage._casbaseddirectory import CasBasedDirectory
    
    ... ... @@ -114,6 +116,9 @@ class Sandbox():
    114 116
             # directory via get_directory.
    
    115 117
             self._never_cache_vdirs = False
    
    116 118
     
    
    119
    +        # Queued commands
    
    120
    +        self._queue = []
    
    121
    +
    
    117 122
         def get_directory(self):
    
    118 123
             """Fetches the sandbox root directory
    
    119 124
     
    
    ... ... @@ -228,6 +233,56 @@ class Sandbox():
    228 233
             raise ImplError("Sandbox of type '{}' does not implement run()"
    
    229 234
                             .format(type(self).__name__))
    
    230 235
     
    
    236
    +    def queue(self, command, *, start_callback=None, complete_callback=None):
    
    237
    +        """Queue a command to be run in the sandbox.
    
    238
    +
    
    239
    +        If the command fails, commands queued later will not be executed.
    
    240
    +        The callbacks are not guaranteed to be invoked in real time.
    
    241
    +
    
    242
    +        Args:
    
    243
    +            command (list): The command to run in the sandboxed environment, as a list
    
    244
    +                            of strings starting with the binary to run.
    
    245
    +            start_callback (callable): Called when the command starts.
    
    246
    +            complete_callback (callble): Called when the command completes
    
    247
    +                                         with the exit code as argument.
    
    248
    +        """
    
    249
    +        entry = namedtuple('QueueEntry', ['command', 'start_callback', 'complete_callback'])
    
    250
    +        entry.command = command
    
    251
    +        entry.start_callback = start_callback
    
    252
    +        entry.complete_callback = complete_callback
    
    253
    +        self._queue.append(entry)
    
    254
    +
    
    255
    +    def run_queue(self, flags, *, cwd=None, env=None):
    
    256
    +        """Run a command in the sandbox.
    
    257
    +
    
    258
    +        Args:
    
    259
    +            flags (:class:`.SandboxFlags`): The flags for running this command.
    
    260
    +            cwd (str): The sandbox relative working directory in which to run the command.
    
    261
    +            env (dict): A dictionary of string key, value pairs to set as environment
    
    262
    +                        variables inside the sandbox environment.
    
    263
    +
    
    264
    +        Raises:
    
    265
    +            (:class:`.ProgramNotFoundError`): If a host tool which the given sandbox
    
    266
    +                                              implementation requires is not found.
    
    267
    +
    
    268
    +        .. note::
    
    269
    +
    
    270
    +           The optional *cwd* argument will default to the value set with
    
    271
    +           :func:`~buildstream.sandbox.Sandbox.set_work_directory`
    
    272
    +        """
    
    273
    +        queue = self._queue
    
    274
    +        self._queue = []
    
    275
    +
    
    276
    +        for entry in queue:
    
    277
    +            if entry.start_callback:
    
    278
    +                entry.start_callback()
    
    279
    +            if entry.command:
    
    280
    +                exit_code = self.run(entry.command, flags, cwd=cwd, env=env)
    
    281
    +            if entry.complete_callback:
    
    282
    +                entry.complete_callback(exit_code)
    
    283
    +            if exit_code != 0:
    
    284
    +                break
    
    285
    +
    
    231 286
         ################################################
    
    232 287
         #               Private methods                #
    
    233 288
         ################################################
    
    ... ... @@ -375,3 +430,12 @@ class Sandbox():
    375 430
                     return True
    
    376 431
     
    
    377 432
             return False
    
    433
    +
    
    434
    +    # _has_commands_queued()
    
    435
    +    #
    
    436
    +    # Returns whether the sandbox has a non-empty queue of pending commands.
    
    437
    +    # 
    
    438
    +    # Returns:
    
    439
    +    #    (bool): Whether the command queue is non-empty.
    
    440
    +    def _has_commands_queued(self):
    
    441
    +        return len(self._queue) > 0

  • buildstream/source.py
    ... ... @@ -965,28 +965,48 @@ class Source(Plugin):
    965 965
         # Tries to call fetch for every mirror, stopping once it succeeds
    
    966 966
         def __do_fetch(self, **kwargs):
    
    967 967
             project = self._get_project()
    
    968
    -        source_fetchers = self.get_source_fetchers()
    
    968
    +        context = self._get_context()
    
    969
    +
    
    970
    +        # Silence the STATUS messages which might happen as a result
    
    971
    +        # of checking the source fetchers.
    
    972
    +        with context.silence():
    
    973
    +            source_fetchers = self.get_source_fetchers()
    
    969 974
     
    
    970 975
             # Use the source fetchers if they are provided
    
    971 976
             #
    
    972 977
             if source_fetchers:
    
    973
    -            for fetcher in source_fetchers:
    
    974
    -                alias = fetcher._get_alias()
    
    975
    -                for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
    
    976
    -                    try:
    
    977
    -                        fetcher.fetch(uri)
    
    978
    -                    # FIXME: Need to consider temporary vs. permanent failures,
    
    979
    -                    #        and how this works with retries.
    
    980
    -                    except BstError as e:
    
    981
    -                        last_error = e
    
    982
    -                        continue
    
    983
    -
    
    984
    -                    # No error, we're done with this fetcher
    
    985
    -                    break
    
    986 978
     
    
    987
    -                else:
    
    988
    -                    # No break occurred, raise the last detected error
    
    989
    -                    raise last_error
    
    979
    +            # Use a contorted loop here, this is to allow us to
    
    980
    +            # silence the messages which can result from consuming
    
    981
    +            # the items of source_fetchers, if it happens to be a generator.
    
    982
    +            #
    
    983
    +            source_fetchers = iter(source_fetchers)
    
    984
    +            try:
    
    985
    +
    
    986
    +                while True:
    
    987
    +
    
    988
    +                    with context.silence():
    
    989
    +                        fetcher = next(source_fetchers)
    
    990
    +
    
    991
    +                    alias = fetcher._get_alias()
    
    992
    +                    for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
    
    993
    +                        try:
    
    994
    +                            fetcher.fetch(uri)
    
    995
    +                        # FIXME: Need to consider temporary vs. permanent failures,
    
    996
    +                        #        and how this works with retries.
    
    997
    +                        except BstError as e:
    
    998
    +                            last_error = e
    
    999
    +                            continue
    
    1000
    +
    
    1001
    +                        # No error, we're done with this fetcher
    
    1002
    +                        break
    
    1003
    +
    
    1004
    +                    else:
    
    1005
    +                        # No break occurred, raise the last detected error
    
    1006
    +                        raise last_error
    
    1007
    +
    
    1008
    +            except StopIteration:
    
    1009
    +                pass
    
    990 1010
     
    
    991 1011
             # Default codepath is to reinstantiate the Source
    
    992 1012
             #
    

  • buildstream/utils.py
    ... ... @@ -502,7 +502,7 @@ def get_bst_version():
    502 502
     
    
    503 503
     @contextmanager
    
    504 504
     def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
    
    505
    -                     errors=None, newline=None, closefd=True, opener=None):
    
    505
    +                     errors=None, newline=None, closefd=True, opener=None, tempdir=None):
    
    506 506
         """Save a file with a temporary name and rename it into place when ready.
    
    507 507
     
    
    508 508
         This is a context manager which is meant for saving data to files.
    
    ... ... @@ -529,8 +529,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
    529 529
         # https://bugs.python.org/issue8604
    
    530 530
     
    
    531 531
         assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
    
    532
    -    dirname = os.path.dirname(filename)
    
    533
    -    fd, tempname = tempfile.mkstemp(dir=dirname)
    
    532
    +    if tempdir is None:
    
    533
    +        tempdir = os.path.dirname(filename)
    
    534
    +    fd, tempname = tempfile.mkstemp(dir=tempdir)
    
    534 535
         os.close(fd)
    
    535 536
     
    
    536 537
         f = open(tempname, mode=mode, buffering=buffering, encoding=encoding,
    
    ... ... @@ -562,6 +563,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
    562 563
     #
    
    563 564
     # Get the disk usage of a given directory in bytes.
    
    564 565
     #
    
    566
    +# This function assumes that files do not inadvertantly
    
    567
    +# disappear while this function is running.
    
    568
    +#
    
    565 569
     # Arguments:
    
    566 570
     #     (str) The path whose size to check.
    
    567 571
     #
    
    ... ... @@ -682,7 +686,7 @@ def _force_rmtree(rootpath, **kwargs):
    682 686
     
    
    683 687
         try:
    
    684 688
             shutil.rmtree(rootpath, **kwargs)
    
    685
    -    except shutil.Error as e:
    
    689
    +    except OSError as e:
    
    686 690
             raise UtilError("Failed to remove cache directory '{}': {}"
    
    687 691
                             .format(rootpath, e))
    
    688 692
     
    

  • setup.py
    ... ... @@ -54,12 +54,13 @@ REQUIRED_BWRAP_MINOR = 1
    54 54
     REQUIRED_BWRAP_PATCH = 2
    
    55 55
     
    
    56 56
     
    
    57
    -def exit_bwrap(reason):
    
    57
    +def warn_bwrap(reason):
    
    58 58
         print(reason +
    
    59
    -          "\nBuildStream requires Bubblewrap (bwrap) for"
    
    60
    -          " sandboxing the build environment. Install it using your package manager"
    
    61
    -          " (usually bwrap or bubblewrap)")
    
    62
    -    sys.exit(1)
    
    59
    +          "\nBuildStream requires Bubblewrap (bwrap {}.{}.{} or better),"
    
    60
    +          " during local builds, for"
    
    61
    +          " sandboxing the build environment.\nInstall it using your package manager"
    
    62
    +          " (usually bwrap or bubblewrap) otherwise you will be limited to"
    
    63
    +          " remote builds only.".format(REQUIRED_BWRAP_MAJOR, REQUIRED_BWRAP_MINOR, REQUIRED_BWRAP_PATCH))
    
    63 64
     
    
    64 65
     
    
    65 66
     def bwrap_too_old(major, minor, patch):
    
    ... ... @@ -76,18 +77,19 @@ def bwrap_too_old(major, minor, patch):
    76 77
             return False
    
    77 78
     
    
    78 79
     
    
    79
    -def assert_bwrap():
    
    80
    +def check_for_bwrap():
    
    80 81
         platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform
    
    81 82
         if platform.startswith('linux'):
    
    82 83
             bwrap_path = shutil.which('bwrap')
    
    83 84
             if not bwrap_path:
    
    84
    -            exit_bwrap("Bubblewrap not found")
    
    85
    +            warn_bwrap("Bubblewrap not found")
    
    86
    +            return
    
    85 87
     
    
    86 88
             version_bytes = subprocess.check_output([bwrap_path, "--version"]).split()[1]
    
    87 89
             version_string = str(version_bytes, "utf-8")
    
    88 90
             major, minor, patch = map(int, version_string.split("."))
    
    89 91
             if bwrap_too_old(major, minor, patch):
    
    90
    -            exit_bwrap("Bubblewrap too old")
    
    92
    +            warn_bwrap("Bubblewrap too old")
    
    91 93
     
    
    92 94
     
    
    93 95
     ###########################################
    
    ... ... @@ -126,7 +128,7 @@ bst_install_entry_points = {
    126 128
     }
    
    127 129
     
    
    128 130
     if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
    
    129
    -    assert_bwrap()
    
    131
    +    check_for_bwrap()
    
    130 132
         bst_install_entry_points['console_scripts'] += [
    
    131 133
             'bst = buildstream._frontend:cli'
    
    132 134
         ]
    

  • tests/frontend/mirror.py
    ... ... @@ -139,6 +139,82 @@ def test_mirror_fetch(cli, tmpdir, datafiles, kind):
    139 139
         result.assert_success()
    
    140 140
     
    
    141 141
     
    
    142
    +@pytest.mark.datafiles(DATA_DIR)
    
    143
    +@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
    
    144
    +@pytest.mark.parametrize("mirror", [("no-mirror"), ("mirror"), ("unrelated-mirror")])
    
    145
    +def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
    
    146
    +    bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
    
    147
    +    dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
    
    148
    +    upstream_repodir = os.path.join(str(tmpdir), 'upstream')
    
    149
    +    mirror_repodir = os.path.join(str(tmpdir), 'mirror')
    
    150
    +    project_dir = os.path.join(str(tmpdir), 'project')
    
    151
    +    os.makedirs(project_dir)
    
    152
    +    element_dir = os.path.join(project_dir, 'elements')
    
    153
    +
    
    154
    +    # Create repo objects of the upstream and mirror
    
    155
    +    upstream_repo = create_repo('tar', upstream_repodir)
    
    156
    +    upstream_ref = upstream_repo.create(bin_files_path)
    
    157
    +    mirror_repo = upstream_repo.copy(mirror_repodir)
    
    158
    +    mirror_ref = upstream_ref
    
    159
    +    upstream_ref = upstream_repo.create(dev_files_path)
    
    160
    +
    
    161
    +    element = {
    
    162
    +        'kind': 'import',
    
    163
    +        'sources': [
    
    164
    +            upstream_repo.source_config(ref=upstream_ref if ref_storage == 'inline' else None)
    
    165
    +        ]
    
    166
    +    }
    
    167
    +    element_name = 'test.bst'
    
    168
    +    element_path = os.path.join(element_dir, element_name)
    
    169
    +    full_repo = element['sources'][0]['url']
    
    170
    +    upstream_map, repo_name = os.path.split(full_repo)
    
    171
    +    alias = 'foo'
    
    172
    +    aliased_repo = alias + ':' + repo_name
    
    173
    +    element['sources'][0]['url'] = aliased_repo
    
    174
    +    full_mirror = mirror_repo.source_config()['url']
    
    175
    +    mirror_map, _ = os.path.split(full_mirror)
    
    176
    +    os.makedirs(element_dir)
    
    177
    +    _yaml.dump(element, element_path)
    
    178
    +
    
    179
    +    if ref_storage == 'project.refs':
    
    180
    +        # Manually set project.refs to avoid caching the repo prematurely
    
    181
    +        project_refs = {'projects': {
    
    182
    +            'test': {
    
    183
    +                element_name: [
    
    184
    +                    {'ref': upstream_ref}
    
    185
    +                ]
    
    186
    +            }
    
    187
    +        }}
    
    188
    +        project_refs_path = os.path.join(project_dir, 'project.refs')
    
    189
    +        _yaml.dump(project_refs, project_refs_path)
    
    190
    +
    
    191
    +    project = {
    
    192
    +        'name': 'test',
    
    193
    +        'element-path': 'elements',
    
    194
    +        'aliases': {
    
    195
    +            alias: upstream_map + "/"
    
    196
    +        },
    
    197
    +        'ref-storage': ref_storage
    
    198
    +    }
    
    199
    +    if mirror != 'no-mirror':
    
    200
    +        mirror_data = [{
    
    201
    +            'name': 'middle-earth',
    
    202
    +            'aliases': {alias: [mirror_map + '/']}
    
    203
    +        }]
    
    204
    +        if mirror == 'unrelated-mirror':
    
    205
    +            mirror_data.insert(0, {
    
    206
    +                'name': 'narnia',
    
    207
    +                'aliases': {'frob': ['http://www.example.com/repo']}
    
    208
    +            })
    
    209
    +        project['mirrors'] = mirror_data
    
    210
    +
    
    211
    +    project_file = os.path.join(project_dir, 'project.conf')
    
    212
    +    _yaml.dump(project, project_file)
    
    213
    +
    
    214
    +    result = cli.run(project=project_dir, args=['fetch', element_name])
    
    215
    +    result.assert_success()
    
    216
    +
    
    217
    +
    
    142 218
     @pytest.mark.datafiles(DATA_DIR)
    
    143 219
     @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
    
    144 220
     def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind):
    

  • tests/frontend/workspace.py
    ... ... @@ -43,7 +43,8 @@ DATA_DIR = os.path.join(
    43 43
     )
    
    44 44
     
    
    45 45
     
    
    46
    -def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None, project_path=None):
    
    46
    +def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None,
    
    47
    +                   project_path=None, element_attrs=None):
    
    47 48
         if not workspace_dir:
    
    48 49
             workspace_dir = os.path.join(str(tmpdir), 'workspace{}'.format(suffix))
    
    49 50
         if not project_path:
    
    ... ... @@ -69,6 +70,8 @@ def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir
    69 70
                 repo.source_config(ref=ref)
    
    70 71
             ]
    
    71 72
         }
    
    73
    +    if element_attrs:
    
    74
    +        element = {**element, **element_attrs}
    
    72 75
         _yaml.dump(element,
    
    73 76
                    os.path.join(element_path,
    
    74 77
                                 element_name))
    
    ... ... @@ -854,3 +857,22 @@ def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict):
    854 857
     
    
    855 858
         # Check that the original /usr/bin/hello is not in the checkout
    
    856 859
         assert not os.path.exists(os.path.join(checkout, 'usr', 'bin', 'hello'))
    
    860
    +
    
    861
    +
    
    862
    +@pytest.mark.datafiles(DATA_DIR)
    
    863
    +def test_multiple_failed_builds(cli, tmpdir, datafiles):
    
    864
    +    element_config = {
    
    865
    +        "kind": "manual",
    
    866
    +        "config": {
    
    867
    +            "configure-commands": [
    
    868
    +                "unknown_command_that_will_fail"
    
    869
    +            ]
    
    870
    +        }
    
    871
    +    }
    
    872
    +    element_name, project, _ = open_workspace(cli, tmpdir, datafiles,
    
    873
    +                                              "git", False, element_attrs=element_config)
    
    874
    +
    
    875
    +    for _ in range(2):
    
    876
    +        result = cli.run(project=project, args=["build", element_name])
    
    877
    +        assert "BUG" not in result.stderr
    
    878
    +        assert cli.get_element_state(project, element_name) != "cached"



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]