[Notes] [Git][BuildGrid/buildgrid][mablanch/132-gather-state-metrics] 19 commits: buildgrid/utils.py: New `get_hash_type` function.



Title: GitLab

Martin Blanchard pushed to branch mablanch/132-gather-state-metrics at BuildGrid / buildgrid

Commits:

21 changed files:

Changes:

  • .pylintrc
    ... ... @@ -185,6 +185,7 @@ ignore-on-opaque-inference=yes
    185 185
     # for classes with dynamically set attributes). This supports the use of
    
    186 186
     # qualified names.
    
    187 187
     ignored-classes=google.protobuf.any_pb2.Any,
    
    188
    +                google.protobuf.duration_pb2.Duration,
    
    188 189
                     google.protobuf.timestamp_pb2.Timestamp
    
    189 190
     
    
    190 191
     # List of module names for which member attributes should not be checked
    
    ... ... @@ -460,6 +461,7 @@ known-third-party=boto3,
    460 461
                       enchant,
    
    461 462
                       google,
    
    462 463
                       grpc,
    
    464
    +                  janus,
    
    463 465
                       moto,
    
    464 466
                       yaml
    
    465 467
     
    

  • buildgrid/_app/commands/cmd_capabilities.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    16
    +import sys
    
    17
    +from urllib.parse import urlparse
    
    18
    +
    
    19
    +import click
    
    20
    +import grpc
    
    21
    +
    
    22
    +from buildgrid.client.capabilities import CapabilitiesInterface
    
    23
    +
    
    24
    +from ..cli import pass_context
    
    25
    +
    
    26
    +
    
    27
    +@click.command(name='capabilities', short_help="Capabilities service.")
    
    28
    +@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
    
    29
    +              help="Remote execution server's URL (port defaults to 50051 if no specified).")
    
    30
    +@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
    
    31
    +              help="Private client key for TLS (PEM-encoded)")
    
    32
    +@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
    
    33
    +              help="Public client certificate for TLS (PEM-encoded)")
    
    34
    +@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
    
    35
    +              help="Public server certificate for TLS (PEM-encoded)")
    
    36
    +@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
    
    37
    +              help="Targeted farm instance name.")
    
    38
    +@pass_context
    
    39
    +def cli(context, remote, instance_name, client_key, client_cert, server_cert):
    
    40
    +    click.echo("Getting capabilities...")
    
    41
    +    url = urlparse(remote)
    
    42
    +
    
    43
    +    remote = '{}:{}'.format(url.hostname, url.port or 50051)
    
    44
    +    instance_name = instance_name
    
    45
    +
    
    46
    +    if url.scheme == 'http':
    
    47
    +        channel = grpc.insecure_channel(remote)
    
    48
    +    else:
    
    49
    +        credentials = context.load_client_credentials(client_key, client_cert, server_cert)
    
    50
    +        if not credentials:
    
    51
    +            click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
    
    52
    +            sys.exit(-1)
    
    53
    +
    
    54
    +        channel = grpc.secure_channel(remote, credentials)
    
    55
    +
    
    56
    +    interface = CapabilitiesInterface(channel)
    
    57
    +    response = interface.get_capabilities(instance_name)
    
    58
    +    click.echo(response)

  • buildgrid/client/capabilities.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    16
    +import logging
    
    17
    +import grpc
    
    18
    +
    
    19
    +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
    
    20
    +
    
    21
    +
    
    22
    +class CapabilitiesInterface:
    
    23
    +    """Interface for calls the the Capabilities Service."""
    
    24
    +
    
    25
    +    def __init__(self, channel):
    
    26
    +        """Initialises an instance of the capabilities service.
    
    27
    +
    
    28
    +        Args:
    
    29
    +            channel (grpc.Channel): A gRPC channel to the CAS endpoint.
    
    30
    +        """
    
    31
    +        self.__logger = logging.getLogger(__name__)
    
    32
    +        self.__stub = remote_execution_pb2_grpc.CapabilitiesStub(channel)
    
    33
    +
    
    34
    +    def get_capabilities(self, instance_name):
    
    35
    +        """Returns the capabilities or the server to the user.
    
    36
    +
    
    37
    +        Args:
    
    38
    +            instance_name (str): The name of the instance."""
    
    39
    +
    
    40
    +        request = remote_execution_pb2.GetCapabilitiesRequest(instance_name=instance_name)
    
    41
    +        try:
    
    42
    +            return self.__stub.GetCapabilities(request)
    
    43
    +
    
    44
    +        except grpc.RpcError as e:
    
    45
    +            self.__logger.error(e)
    
    46
    +            raise

  • buildgrid/server/bots/instance.py
    ... ... @@ -37,6 +37,10 @@ class BotsInterface:
    37 37
             self._assigned_leases = {}
    
    38 38
             self._scheduler = scheduler
    
    39 39
     
    
    40
    +    @property
    
    41
    +    def scheduler(self):
    
    42
    +        return self._scheduler
    
    43
    +
    
    40 44
         def register_instance_with_server(self, instance_name, server):
    
    41 45
             server.add_bots_interface(self, instance_name)
    
    42 46
     
    

  • buildgrid/server/bots/service.py
    ... ... @@ -23,8 +23,9 @@ import logging
    23 23
     
    
    24 24
     import grpc
    
    25 25
     
    
    26
    -from google.protobuf.empty_pb2 import Empty
    
    26
    +from google.protobuf import empty_pb2, timestamp_pb2
    
    27 27
     
    
    28
    +from buildgrid._enums import BotStatus
    
    28 29
     from buildgrid._exceptions import InvalidArgumentError, OutOfSyncError
    
    29 30
     from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
    
    30 31
     from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grpc
    
    ... ... @@ -32,24 +33,86 @@ from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grp
    32 33
     
    
    33 34
     class BotsService(bots_pb2_grpc.BotsServicer):
    
    34 35
     
    
    35
    -    def __init__(self, server):
    
    36
    +    def __init__(self, server, monitor=False):
    
    36 37
             self.__logger = logging.getLogger(__name__)
    
    37 38
     
    
    39
    +        self.__bots_by_status = None
    
    40
    +        self.__bots_by_instance = None
    
    41
    +        self.__bots = None
    
    42
    +
    
    38 43
             self._instances = {}
    
    39 44
     
    
    40 45
             bots_pb2_grpc.add_BotsServicer_to_server(self, server)
    
    41 46
     
    
    42
    -    def add_instance(self, name, instance):
    
    43
    -        self._instances[name] = instance
    
    47
    +        self._is_instrumented = monitor
    
    48
    +
    
    49
    +        if self._is_instrumented:
    
    50
    +            self.__bots_by_status = {}
    
    51
    +            self.__bots_by_instance = {}
    
    52
    +            self.__bots = {}
    
    53
    +
    
    54
    +            self.__bots_by_status[BotStatus.OK] = set()
    
    55
    +            self.__bots_by_status[BotStatus.UNHEALTHY] = set()
    
    56
    +
    
    57
    +    # --- Public API ---
    
    58
    +
    
    59
    +    def add_instance(self, instance_name, instance):
    
    60
    +        """Registers a new servicer instance.
    
    61
    +
    
    62
    +        Args:
    
    63
    +            instance_name (str): The new instance's name.
    
    64
    +            instance (BotsInterface): The new instance itself.
    
    65
    +        """
    
    66
    +        self._instances[instance_name] = instance
    
    67
    +
    
    68
    +        if self._is_instrumented:
    
    69
    +            self.__bots_by_instance[instance_name] = set()
    
    70
    +
    
    71
    +    def get_scheduler(self, instance_name):
    
    72
    +        """Retrieves a reference to the scheduler for an instance.
    
    73
    +
    
    74
    +        Args:
    
    75
    +            instance_name (str): The name of the instance to query.
    
    76
    +
    
    77
    +        Returns:
    
    78
    +            Scheduler: A reference to the scheduler for `instance_name`.
    
    79
    +
    
    80
    +        Raises:
    
    81
    +            InvalidArgumentError: If no instance named `instance_name` exists.
    
    82
    +        """
    
    83
    +        instance = self._get_instance(instance_name)
    
    84
    +
    
    85
    +        return instance.scheduler
    
    86
    +
    
    87
    +    # --- Public API: Servicer ---
    
    44 88
     
    
    45 89
         def CreateBotSession(self, request, context):
    
    90
    +        """Handles CreateBotSessionRequest messages.
    
    91
    +
    
    92
    +        Args:
    
    93
    +            request (CreateBotSessionRequest): The incoming RPC request.
    
    94
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    95
    +        """
    
    46 96
             self.__logger.debug("CreateBotSession request from [%s]", context.peer())
    
    47 97
     
    
    98
    +        instance_name = request.parent
    
    99
    +        bot_status = BotStatus(request.bot_session.status)
    
    100
    +        bot_id = request.bot_session.bot_id
    
    101
    +
    
    48 102
             try:
    
    49
    -            parent = request.parent
    
    50
    -            instance = self._get_instance(request.parent)
    
    51
    -            return instance.create_bot_session(parent,
    
    52
    -                                               request.bot_session)
    
    103
    +            instance = self._get_instance(instance_name)
    
    104
    +            bot_session = instance.create_bot_session(instance_name,
    
    105
    +                                                      request.bot_session)
    
    106
    +            now = timestamp_pb2.Timestamp()
    
    107
    +            now.GetCurrentTime()
    
    108
    +
    
    109
    +            if self._is_instrumented:
    
    110
    +                self.__bots[bot_id] = now
    
    111
    +                self.__bots_by_instance[instance_name].add(bot_id)
    
    112
    +                if bot_status in self.__bots_by_status:
    
    113
    +                    self.__bots_by_status[bot_status].add(bot_id)
    
    114
    +
    
    115
    +            return bot_session
    
    53 116
     
    
    54 117
             except InvalidArgumentError as e:
    
    55 118
                 self.__logger.error(e)
    
    ... ... @@ -59,17 +122,41 @@ class BotsService(bots_pb2_grpc.BotsServicer):
    59 122
             return bots_pb2.BotSession()
    
    60 123
     
    
    61 124
         def UpdateBotSession(self, request, context):
    
    125
    +        """Handles UpdateBotSessionRequest messages.
    
    126
    +
    
    127
    +        Args:
    
    128
    +            request (UpdateBotSessionRequest): The incoming RPC request.
    
    129
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    130
    +        """
    
    62 131
             self.__logger.debug("UpdateBotSession request from [%s]", context.peer())
    
    63 132
     
    
    133
    +        names = request.name.split("/")
    
    134
    +        bot_status = BotStatus(request.bot_session.status)
    
    135
    +        bot_id = request.bot_session.bot_id
    
    136
    +
    
    64 137
             try:
    
    65
    -            names = request.name.split("/")
    
    66
    -            # Operation name should be in format:
    
    67
    -            # {instance/name}/{uuid}
    
    68
    -            instance_name = ''.join(names[0:-1])
    
    138
    +            instance_name = '/'.join(names[:-1])
    
    69 139
     
    
    70 140
                 instance = self._get_instance(instance_name)
    
    71
    -            return instance.update_bot_session(request.name,
    
    72
    -                                               request.bot_session)
    
    141
    +            bot_session = instance.update_bot_session(request.name,
    
    142
    +                                                      request.bot_session)
    
    143
    +
    
    144
    +            if self._is_instrumented:
    
    145
    +                self.__bots[bot_id].GetCurrentTime()
    
    146
    +                if bot_id not in self.__bots_by_status[bot_status]:
    
    147
    +                    if bot_status == BotStatus.OK:
    
    148
    +                        self.__bots_by_status[BotStatus.OK].add(bot_id)
    
    149
    +                        self.__bots_by_status[BotStatus.UNHEALTHY].discard(bot_id)
    
    150
    +
    
    151
    +                    elif bot_status == BotStatus.UNHEALTHY:
    
    152
    +                        self.__bots_by_status[BotStatus.OK].discard(bot_id)
    
    153
    +                        self.__bots_by_status[BotStatus.UNHEALTHY].add(bot_id)
    
    154
    +
    
    155
    +                    else:
    
    156
    +                        self.__bots_by_instance[instance_name].remove(bot_id)
    
    157
    +                        del self.__bots[bot_id]
    
    158
    +
    
    159
    +            return bot_session
    
    73 160
     
    
    74 161
             except InvalidArgumentError as e:
    
    75 162
                 self.__logger.error(e)
    
    ... ... @@ -89,10 +176,47 @@ class BotsService(bots_pb2_grpc.BotsServicer):
    89 176
             return bots_pb2.BotSession()
    
    90 177
     
    
    91 178
         def PostBotEventTemp(self, request, context):
    
    179
    +        """Handles PostBotEventTempRequest messages.
    
    180
    +
    
    181
    +        Args:
    
    182
    +            request (PostBotEventTempRequest): The incoming RPC request.
    
    183
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    184
    +        """
    
    92 185
             self.__logger.debug("PostBotEventTemp request from [%s]", context.peer())
    
    93 186
     
    
    94 187
             context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    
    95
    -        return Empty()
    
    188
    +
    
    189
    +        return empty_pb2.Empty()
    
    190
    +
    
    191
    +    # --- Public API: Monitoring ---
    
    192
    +
    
    193
    +    @property
    
    194
    +    def is_instrumented(self):
    
    195
    +        return self._is_instrumented
    
    196
    +
    
    197
    +    def query_n_bots(self):
    
    198
    +        if self.__bots is not None:
    
    199
    +            return len(self.__bots)
    
    200
    +
    
    201
    +        return 0
    
    202
    +
    
    203
    +    def query_n_bots_for_instance(self, instance_name):
    
    204
    +        try:
    
    205
    +            if self.__bots_by_instance is not None:
    
    206
    +                return len(self.__bots_by_instance[instance_name])
    
    207
    +        except KeyError:
    
    208
    +            pass
    
    209
    +        return 0
    
    210
    +
    
    211
    +    def query_n_bots_for_status(self, bot_status):
    
    212
    +        try:
    
    213
    +            if self.__bots_by_status is not None:
    
    214
    +                return len(self.__bots_by_status[bot_status])
    
    215
    +        except KeyError:
    
    216
    +            pass
    
    217
    +        return 0
    
    218
    +
    
    219
    +    # --- Private API ---
    
    96 220
     
    
    97 221
         def _get_instance(self, name):
    
    98 222
             try:
    

  • buildgrid/server/capabilities/__init__.py

  • buildgrid/server/capabilities/instance.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    16
    +import logging
    
    17
    +
    
    18
    +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
    
    19
    +
    
    20
    +
    
    21
    +class CapabilitiesInstance:
    
    22
    +
    
    23
    +    def __init__(self, cas_instance=None, action_cache_instance=None, execution_instance=None):
    
    24
    +        self.__logger = logging.getLogger(__name__)
    
    25
    +        self.__cas_instance = cas_instance
    
    26
    +        self.__action_cache_instance = action_cache_instance
    
    27
    +        self.__execution_instance = execution_instance
    
    28
    +
    
    29
    +    def register_instance_with_server(self, instance_name, server):
    
    30
    +        server.add_capabilities_instance(self, instance_name)
    
    31
    +
    
    32
    +    def add_cas_instance(self, cas_instance):
    
    33
    +        self.__cas_instance = cas_instance
    
    34
    +
    
    35
    +    def add_action_cache_instance(self, action_cache_instance):
    
    36
    +        self.__action_cache_instance = action_cache_instance
    
    37
    +
    
    38
    +    def add_execution_instance(self, execution_instance):
    
    39
    +        self.__execution_instance = execution_instance
    
    40
    +
    
    41
    +    def get_capabilities(self):
    
    42
    +        server_capabilities = remote_execution_pb2.ServerCapabilities()
    
    43
    +        server_capabilities.cache_capabilities.CopyFrom(self._get_cache_capabilities())
    
    44
    +        server_capabilities.execution_capabilities.CopyFrom(self._get_capabilities_execution())
    
    45
    +        # TODO
    
    46
    +        # When API is stable, fill out SemVer values
    
    47
    +        # server_capabilities.deprecated_api_version =
    
    48
    +        # server_capabilities.low_api_version =
    
    49
    +        # server_capabilities.low_api_version =
    
    50
    +        # server_capabilities.hig_api_version =
    
    51
    +        return server_capabilities
    
    52
    +
    
    53
    +    def _get_cache_capabilities(self):
    
    54
    +        capabilities = remote_execution_pb2.CacheCapabilities()
    
    55
    +        action_cache_update_capabilities = remote_execution_pb2.ActionCacheUpdateCapabilities()
    
    56
    +
    
    57
    +        if self.__cas_instance:
    
    58
    +            capabilities.digest_function.extend([self.__cas_instance.hash_type()])
    
    59
    +            capabilities.max_batch_total_size_bytes = self.__cas_instance.max_batch_total_size_bytes()
    
    60
    +            capabilities.symlink_absolute_path_strategy = self.__cas_instance.symlink_absolute_path_strategy()
    
    61
    +            # TODO: execution priority #102
    
    62
    +            # capabilities.cache_priority_capabilities =
    
    63
    +
    
    64
    +        if self.__action_cache_instance:
    
    65
    +            action_cache_update_capabilities.update_enabled = self.__action_cache_instance.allow_updates
    
    66
    +
    
    67
    +        capabilities.action_cache_update_capabilities.CopyFrom(action_cache_update_capabilities)
    
    68
    +        return capabilities
    
    69
    +
    
    70
    +    def _get_capabilities_execution(self):
    
    71
    +        capabilities = remote_execution_pb2.ExecutionCapabilities()
    
    72
    +        if self.__execution_instance:
    
    73
    +            capabilities.exec_enabled = True
    
    74
    +            capabilities.digest_function = self.__execution_instance.hash_type()
    
    75
    +            # TODO: execution priority #102
    
    76
    +            # capabilities.execution_priority =
    
    77
    +
    
    78
    +        else:
    
    79
    +            capabilities.exec_enabled = False
    
    80
    +
    
    81
    +        return capabilities

  • buildgrid/server/capabilities/service.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    16
    +import logging
    
    17
    +
    
    18
    +import grpc
    
    19
    +
    
    20
    +from buildgrid._exceptions import InvalidArgumentError
    
    21
    +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
    
    22
    +
    
    23
    +
    
    24
    +class CapabilitiesService(remote_execution_pb2_grpc.CapabilitiesServicer):
    
    25
    +
    
    26
    +    def __init__(self, server):
    
    27
    +        self.__logger = logging.getLogger(__name__)
    
    28
    +        self.__instances = {}
    
    29
    +        remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(self, server)
    
    30
    +
    
    31
    +    def add_instance(self, name, instance):
    
    32
    +        self.__instances[name] = instance
    
    33
    +
    
    34
    +    def add_cas_instance(self, name, instance):
    
    35
    +        self.__instances[name].add_cas_instance(instance)
    
    36
    +
    
    37
    +    def add_action_cache_instance(self, name, instance):
    
    38
    +        self.__instances[name].add_action_cache_instance(instance)
    
    39
    +
    
    40
    +    def add_execution_instance(self, name, instance):
    
    41
    +        self.__instances[name].add_execution_instance(instance)
    
    42
    +
    
    43
    +    def GetCapabilities(self, request, context):
    
    44
    +        try:
    
    45
    +            instance = self._get_instance(request.instance_name)
    
    46
    +            return instance.get_capabilities()
    
    47
    +
    
    48
    +        except InvalidArgumentError as e:
    
    49
    +            self.__logger.error(e)
    
    50
    +            context.set_details(str(e))
    
    51
    +            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
    
    52
    +
    
    53
    +        return remote_execution_pb2.ServerCapabilities()
    
    54
    +
    
    55
    +    def _get_instance(self, name):
    
    56
    +        try:
    
    57
    +            return self.__instances[name]
    
    58
    +
    
    59
    +        except KeyError:
    
    60
    +            raise InvalidArgumentError("Instance doesn't exist on server: [{}]".format(name))

  • buildgrid/server/cas/instance.py
    ... ... @@ -25,6 +25,7 @@ from buildgrid._exceptions import InvalidArgumentError, NotFoundError, OutOfRang
    25 25
     from buildgrid._protos.google.bytestream import bytestream_pb2
    
    26 26
     from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as re_pb2
    
    27 27
     from buildgrid.settings import HASH, HASH_LENGTH
    
    28
    +from buildgrid.utils import get_hash_type
    
    28 29
     
    
    29 30
     
    
    30 31
     class ContentAddressableStorageInstance:
    
    ... ... @@ -37,6 +38,19 @@ class ContentAddressableStorageInstance:
    37 38
         def register_instance_with_server(self, instance_name, server):
    
    38 39
             server.add_cas_instance(self, instance_name)
    
    39 40
     
    
    41
    +    def hash_type(self):
    
    42
    +        return get_hash_type()
    
    43
    +
    
    44
    +    def max_batch_total_size_bytes(self):
    
    45
    +        # TODO: link with max size
    
    46
    +        # Should be added from settings in MR !119
    
    47
    +        return 2000000
    
    48
    +
    
    49
    +    def symlink_absolute_path_strategy(self):
    
    50
    +        # Currently this strategy is hardcoded into BuildGrid
    
    51
    +        # With no setting to reference
    
    52
    +        return re_pb2.CacheCapabilities().DISALLOWED
    
    53
    +
    
    40 54
         def find_missing_blobs(self, blob_digests):
    
    41 55
             storage = self._storage
    
    42 56
             return re_pb2.FindMissingBlobsResponse(
    

  • buildgrid/server/execution/instance.py
    ... ... @@ -25,6 +25,7 @@ from buildgrid._exceptions import FailedPreconditionError, InvalidArgumentError
    25 25
     from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Action
    
    26 26
     
    
    27 27
     from ..job import Job
    
    28
    +from ...utils import get_hash_type
    
    28 29
     
    
    29 30
     
    
    30 31
     class ExecutionInstance:
    
    ... ... @@ -35,9 +36,16 @@ class ExecutionInstance:
    35 36
             self._storage = storage
    
    36 37
             self._scheduler = scheduler
    
    37 38
     
    
    39
    +    @property
    
    40
    +    def scheduler(self):
    
    41
    +        return self._scheduler
    
    42
    +
    
    38 43
         def register_instance_with_server(self, instance_name, server):
    
    39 44
             server.add_execution_instance(self, instance_name)
    
    40 45
     
    
    46
    +    def hash_type(self):
    
    47
    +        return get_hash_type()
    
    48
    +
    
    41 49
         def execute(self, action_digest, skip_cache_lookup, message_queue=None):
    
    42 50
             """ Sends a job for execution.
    
    43 51
             Queues an action and creates an Operation instance to be associated with
    

  • buildgrid/server/execution/service.py
    ... ... @@ -33,30 +33,84 @@ from buildgrid._protos.google.longrunning import operations_pb2
    33 33
     
    
    34 34
     class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    
    35 35
     
    
    36
    -    def __init__(self, server):
    
    36
    +    def __init__(self, server, monitor=False):
    
    37 37
             self.__logger = logging.getLogger(__name__)
    
    38 38
     
    
    39
    +        self.__peers_by_instance = None
    
    40
    +        self.__peers = None
    
    41
    +
    
    39 42
             self._instances = {}
    
    43
    +
    
    40 44
             remote_execution_pb2_grpc.add_ExecutionServicer_to_server(self, server)
    
    41 45
     
    
    42
    -    def add_instance(self, name, instance):
    
    43
    -        self._instances[name] = instance
    
    46
    +        self._is_instrumented = monitor
    
    47
    +
    
    48
    +        if self._is_instrumented:
    
    49
    +            self.__peers_by_instance = {}
    
    50
    +            self.__peers = {}
    
    51
    +
    
    52
    +    # --- Public API ---
    
    53
    +
    
    54
    +    def add_instance(self, instance_name, instance):
    
    55
    +        """Registers a new servicer instance.
    
    56
    +
    
    57
    +        Args:
    
    58
    +            instance_name (str): The new instance's name.
    
    59
    +            instance (ExecutionInstance): The new instance itself.
    
    60
    +        """
    
    61
    +        self._instances[instance_name] = instance
    
    62
    +
    
    63
    +        if self._is_instrumented:
    
    64
    +            self.__peers_by_instance[instance_name] = set()
    
    65
    +
    
    66
    +    def get_scheduler(self, instance_name):
    
    67
    +        """Retrieves a reference to the scheduler for an instance.
    
    68
    +
    
    69
    +        Args:
    
    70
    +            instance_name (str): The name of the instance to query.
    
    71
    +
    
    72
    +        Returns:
    
    73
    +            Scheduler: A reference to the scheduler for `instance_name`.
    
    74
    +
    
    75
    +        Raises:
    
    76
    +            InvalidArgumentError: If no instance named `instance_name` exists.
    
    77
    +        """
    
    78
    +        instance = self._get_instance(instance_name)
    
    79
    +
    
    80
    +        return instance.scheduler
    
    81
    +
    
    82
    +    # --- Public API: Servicer ---
    
    44 83
     
    
    45 84
         def Execute(self, request, context):
    
    85
    +        """Handles ExecuteRequest messages.
    
    86
    +
    
    87
    +        Args:
    
    88
    +            request (ExecuteRequest): The incoming RPC request.
    
    89
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    90
    +        """
    
    46 91
             self.__logger.debug("Execute request from [%s]", context.peer())
    
    47 92
     
    
    93
    +        instance_name = request.instance_name
    
    94
    +        message_queue = queue.Queue()
    
    95
    +        peer = context.peer()
    
    96
    +
    
    48 97
             try:
    
    49
    -            message_queue = queue.Queue()
    
    50
    -            instance = self._get_instance(request.instance_name)
    
    98
    +            instance = self._get_instance(instance_name)
    
    51 99
                 operation = instance.execute(request.action_digest,
    
    52 100
                                              request.skip_cache_lookup,
    
    53 101
                                              message_queue)
    
    54 102
     
    
    55
    -            context.add_callback(partial(instance.unregister_message_client,
    
    56
    -                                         operation.name, message_queue))
    
    103
    +            context.add_callback(partial(self._rpc_termination_callback,
    
    104
    +                                         peer, instance_name, operation.name, message_queue))
    
    57 105
     
    
    58
    -            instanced_op_name = "{}/{}".format(request.instance_name,
    
    59
    -                                               operation.name)
    
    106
    +            if self._is_instrumented:
    
    107
    +                if peer not in self.__peers:
    
    108
    +                    self.__peers_by_instance[instance_name].add(peer)
    
    109
    +                    self.__peers[peer] = 1
    
    110
    +                else:
    
    111
    +                    self.__peers[peer] += 1
    
    112
    +
    
    113
    +            instanced_op_name = "{}/{}".format(instance_name, operation.name)
    
    60 114
     
    
    61 115
                 self.__logger.info("Operation name: [%s]", instanced_op_name)
    
    62 116
     
    
    ... ... @@ -86,23 +140,33 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    86 140
                 yield operations_pb2.Operation()
    
    87 141
     
    
    88 142
         def WaitExecution(self, request, context):
    
    89
    -        self.__logger.debug("WaitExecution request from [%s]", context.peer())
    
    143
    +        """Handles WaitExecutionRequest messages.
    
    90 144
     
    
    91
    -        try:
    
    92
    -            names = request.name.split("/")
    
    145
    +        Args:
    
    146
    +            request (WaitExecutionRequest): The incoming RPC request.
    
    147
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    148
    +        """
    
    149
    +        self.__logger.debug("WaitExecution request from [%s]", context.peer())
    
    93 150
     
    
    94
    -            # Operation name should be in format:
    
    95
    -            # {instance/name}/{operation_id}
    
    96
    -            instance_name = ''.join(names[0:-1])
    
    151
    +        names = request.name.split('/')
    
    152
    +        instance_name = '/'.join(names[:-1])
    
    153
    +        operation_name = names[-1]
    
    154
    +        message_queue = queue.Queue()
    
    155
    +        peer = context.peer()
    
    97 156
     
    
    98
    -            message_queue = queue.Queue()
    
    99
    -            operation_name = names[-1]
    
    157
    +        try:
    
    100 158
                 instance = self._get_instance(instance_name)
    
    101 159
     
    
    102 160
                 instance.register_message_client(operation_name, message_queue)
    
    161
    +            context.add_callback(partial(self._rpc_termination_callback,
    
    162
    +                                         peer, instance_name, operation_name, message_queue))
    
    103 163
     
    
    104
    -            context.add_callback(partial(instance.unregister_message_client,
    
    105
    -                                         operation_name, message_queue))
    
    164
    +            if self._is_instrumented:
    
    165
    +                if peer not in self.__peers:
    
    166
    +                    self.__peers_by_instance[instance_name].add(peer)
    
    167
    +                    self.__peers[peer] = 1
    
    168
    +                else:
    
    169
    +                    self.__peers[peer] += 1
    
    106 170
     
    
    107 171
                 for operation in instance.stream_operation_updates(message_queue,
    
    108 172
                                                                    operation_name):
    
    ... ... @@ -123,6 +187,39 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    123 187
                 context.set_code(grpc.StatusCode.CANCELLED)
    
    124 188
                 yield operations_pb2.Operation()
    
    125 189
     
    
    190
    +    # --- Public API: Monitoring ---
    
    191
    +
    
    192
    +    @property
    
    193
    +    def is_instrumented(self):
    
    194
    +        return self._is_instrumented
    
    195
    +
    
    196
    +    def query_n_clients(self):
    
    197
    +        if self.__peers is not None:
    
    198
    +            return len(self.__peers)
    
    199
    +        return 0
    
    200
    +
    
    201
    +    def query_n_clients_for_instance(self, instance_name):
    
    202
    +        try:
    
    203
    +            if self.__peers_by_instance is not None:
    
    204
    +                return len(self.__peers_by_instance[instance_name])
    
    205
    +        except KeyError:
    
    206
    +            pass
    
    207
    +        return 0
    
    208
    +
    
    209
    +    # --- Private API ---
    
    210
    +
    
    211
    +    def _rpc_termination_callback(self, peer, instance_name, job_name, message_queue):
    
    212
    +        instance = self._get_instance(instance_name)
    
    213
    +
    
    214
    +        instance.unregister_message_client(job_name, message_queue)
    
    215
    +
    
    216
    +        if self._is_instrumented:
    
    217
    +            if self.__peers[peer] > 1:
    
    218
    +                self.__peers[peer] -= 1
    
    219
    +            else:
    
    220
    +                self.__peers_by_instance[instance_name].remove(peer)
    
    221
    +                del self.__peers[peer]
    
    222
    +
    
    126 223
         def _get_instance(self, name):
    
    127 224
             try:
    
    128 225
                 return self._instances[name]
    

  • buildgrid/server/instance.py
    ... ... @@ -15,12 +15,16 @@
    15 15
     
    
    16 16
     import asyncio
    
    17 17
     from concurrent import futures
    
    18
    +from datetime import timedelta
    
    18 19
     import logging
    
    19 20
     import os
    
    20 21
     import signal
    
    22
    +import time
    
    21 23
     
    
    22 24
     import grpc
    
    23 25
     
    
    26
    +from buildgrid._enums import BotStatus, MetricRecordDomain, MetricRecordType
    
    27
    +from buildgrid._protos.buildgrid.v2 import monitoring_pb2
    
    24 28
     from buildgrid.server.actioncache.service import ActionCacheService
    
    25 29
     from buildgrid.server.bots.service import BotsService
    
    26 30
     from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
    
    ... ... @@ -28,6 +32,9 @@ from buildgrid.server.execution.service import ExecutionService
    28 32
     from buildgrid.server._monitoring import MonitoringBus, MonitoringOutputType, MonitoringOutputFormat
    
    29 33
     from buildgrid.server.operations.service import OperationsService
    
    30 34
     from buildgrid.server.referencestorage.service import ReferenceStorageService
    
    35
    +from buildgrid.server.capabilities.instance import CapabilitiesInstance
    
    36
    +from buildgrid.server.capabilities.service import CapabilitiesService
    
    37
    +from buildgrid.settings import MONITORING_PERIOD
    
    31 38
     
    
    32 39
     
    
    33 40
     class BuildGridServer:
    
    ... ... @@ -55,6 +62,11 @@ class BuildGridServer:
    55 62
             self.__main_loop = asyncio.get_event_loop()
    
    56 63
             self.__monitoring_bus = None
    
    57 64
     
    
    65
    +        self.__state_monitoring_task = None
    
    66
    +
    
    67
    +        # We always want a capabilities service
    
    68
    +        self._capabilities_service = CapabilitiesService(self.__grpc_server)
    
    69
    +
    
    58 70
             self._execution_service = None
    
    59 71
             self._bots_service = None
    
    60 72
             self._operations_service = None
    
    ... ... @@ -63,6 +75,9 @@ class BuildGridServer:
    63 75
             self._cas_service = None
    
    64 76
             self._bytestream_service = None
    
    65 77
     
    
    78
    +        self._schedulers = {}
    
    79
    +        self._instances = set()
    
    80
    +
    
    66 81
             self._is_instrumented = monitor
    
    67 82
     
    
    68 83
             if self._is_instrumented:
    
    ... ... @@ -79,6 +94,10 @@ class BuildGridServer:
    79 94
             if self._is_instrumented:
    
    80 95
                 self.__monitoring_bus.start()
    
    81 96
     
    
    97
    +            self.__state_monitoring_task = asyncio.ensure_future(
    
    98
    +                self._state_monitoring_worker(period=MONITORING_PERIOD),
    
    99
    +                loop=self.__main_loop)
    
    100
    +
    
    82 101
             self.__main_loop.add_signal_handler(signal.SIGTERM, self.stop)
    
    83 102
     
    
    84 103
             self.__main_loop.run_forever()
    
    ... ... @@ -86,6 +105,9 @@ class BuildGridServer:
    86 105
         def stop(self):
    
    87 106
             """Stops the BuildGrid server."""
    
    88 107
             if self._is_instrumented:
    
    108
    +            if self.__state_monitoring_task is not None:
    
    109
    +                self.__state_monitoring_task.cancel()
    
    110
    +
    
    89 111
                 self.__monitoring_bus.stop()
    
    90 112
     
    
    91 113
             self.__main_loop.stop()
    
    ... ... @@ -125,9 +147,14 @@ class BuildGridServer:
    125 147
                 instance_name (str): Instance name.
    
    126 148
             """
    
    127 149
             if self._execution_service is None:
    
    128
    -            self._execution_service = ExecutionService(self.__grpc_server)
    
    150
    +            self._execution_service = ExecutionService(
    
    151
    +                self.__grpc_server, monitor=self._is_instrumented)
    
    129 152
     
    
    130 153
             self._execution_service.add_instance(instance_name, instance)
    
    154
    +        self._add_capabilities_instance(instance_name, execution_instance=instance)
    
    155
    +
    
    156
    +        self._schedulers[instance_name] = instance.scheduler
    
    157
    +        self._instances.add(instance_name)
    
    131 158
     
    
    132 159
         def add_bots_interface(self, instance, instance_name):
    
    133 160
             """Adds a :obj:`BotsInterface` to the service.
    
    ... ... @@ -139,10 +166,13 @@ class BuildGridServer:
    139 166
                 instance_name (str): Instance name.
    
    140 167
             """
    
    141 168
             if self._bots_service is None:
    
    142
    -            self._bots_service = BotsService(self.__grpc_server)
    
    169
    +            self._bots_service = BotsService(
    
    170
    +                self.__grpc_server, monitor=self._is_instrumented)
    
    143 171
     
    
    144 172
             self._bots_service.add_instance(instance_name, instance)
    
    145 173
     
    
    174
    +        self._instances.add(instance_name)
    
    175
    +
    
    146 176
         def add_operations_instance(self, instance, instance_name):
    
    147 177
             """Adds an :obj:`OperationsInstance` to the service.
    
    148 178
     
    
    ... ... @@ -184,9 +214,10 @@ class BuildGridServer:
    184 214
                 self._action_cache_service = ActionCacheService(self.__grpc_server)
    
    185 215
     
    
    186 216
             self._action_cache_service.add_instance(instance_name, instance)
    
    217
    +        self._add_capabilities_instance(instance_name, action_cache_instance=instance)
    
    187 218
     
    
    188 219
         def add_cas_instance(self, instance, instance_name):
    
    189
    -        """Stores a :obj:`ContentAddressableStorageInstance` to the service.
    
    220
    +        """Adds a :obj:`ContentAddressableStorageInstance` to the service.
    
    190 221
     
    
    191 222
             If no service exists, it creates one.
    
    192 223
     
    
    ... ... @@ -198,9 +229,10 @@ class BuildGridServer:
    198 229
                 self._cas_service = ContentAddressableStorageService(self.__grpc_server)
    
    199 230
     
    
    200 231
             self._cas_service.add_instance(instance_name, instance)
    
    232
    +        self._add_capabilities_instance(instance_name, cas_instance=instance)
    
    201 233
     
    
    202 234
         def add_bytestream_instance(self, instance, instance_name):
    
    203
    -        """Stores a :obj:`ByteStreamInstance` to the service.
    
    235
    +        """Adds a :obj:`ByteStreamInstance` to the service.
    
    204 236
     
    
    205 237
             If no service exists, it creates one.
    
    206 238
     
    
    ... ... @@ -218,3 +250,180 @@ class BuildGridServer:
    218 250
         @property
    
    219 251
         def is_instrumented(self):
    
    220 252
             return self._is_instrumented
    
    253
    +
    
    254
    +    # --- Private API ---
    
    255
    +
    
    256
    +    def _add_capabilities_instance(self, instance_name,
    
    257
    +                                   cas_instance=None,
    
    258
    +                                   action_cache_instance=None,
    
    259
    +                                   execution_instance=None):
    
    260
    +        """Adds a :obj:`CapabilitiesInstance` to the service.
    
    261
    +
    
    262
    +        Args:
    
    263
    +            instance (:obj:`CapabilitiesInstance`): Instance to add.
    
    264
    +            instance_name (str): Instance name.
    
    265
    +        """
    
    266
    +
    
    267
    +        try:
    
    268
    +            if cas_instance:
    
    269
    +                self._capabilities_service.add_cas_instance(instance_name, cas_instance)
    
    270
    +            if action_cache_instance:
    
    271
    +                self._capabilities_service.add_action_cache_instance(instance_name, action_cache_instance)
    
    272
    +            if execution_instance:
    
    273
    +                self._capabilities_service.add_execution_instance(instance_name, execution_instance)
    
    274
    +
    
    275
    +        except KeyError:
    
    276
    +            capabilities_instance = CapabilitiesInstance(cas_instance,
    
    277
    +                                                         action_cache_instance,
    
    278
    +                                                         execution_instance)
    
    279
    +            self._capabilities_service.add_instance(instance_name, capabilities_instance)
    
    280
    +
    
    281
    +    async def _state_monitoring_worker(self, period=1.0):
    
    282
    +        """Periodically publishes state metrics to the monitoring bus."""
    
    283
    +        async def __state_monitoring_worker():
    
    284
    +            # Emit total clients count record:
    
    285
    +            _, record = self._query_n_clients()
    
    286
    +            await self.__monitoring_bus.send_record(record)
    
    287
    +
    
    288
    +            # Emit total bots count record:
    
    289
    +            _, record = self._query_n_bots()
    
    290
    +            await self.__monitoring_bus.send_record(record)
    
    291
    +
    
    292
    +            queue_times = []
    
    293
    +            # Emits records by instance:
    
    294
    +            for instance_name in self._instances:
    
    295
    +                # Emit instance clients count record:
    
    296
    +                _, record = self._query_n_clients_for_instance(instance_name)
    
    297
    +                await self.__monitoring_bus.send_record(record)
    
    298
    +
    
    299
    +                # Emit instance bots count record:
    
    300
    +                _, record = self._query_n_bots_for_instance(instance_name)
    
    301
    +                await self.__monitoring_bus.send_record(record)
    
    302
    +
    
    303
    +                # Emit instance average queue time record:
    
    304
    +                queue_time, record = self._query_am_queue_time_for_instance(instance_name)
    
    305
    +                await self.__monitoring_bus.send_record(record)
    
    306
    +                if queue_time:
    
    307
    +                    queue_times.append(queue_time)
    
    308
    +
    
    309
    +            # Emits records by bot status:
    
    310
    +            for bot_status in [BotStatus.OK, BotStatus.UNHEALTHY]:
    
    311
    +                # Emit status bots count record:
    
    312
    +                _, record = self._query_n_bots_for_status(bot_status)
    
    313
    +                await self.__monitoring_bus.send_record(record)
    
    314
    +
    
    315
    +            # Emit overall average queue time record:
    
    316
    +            if queue_times:
    
    317
    +                am_queue_time = sum(queue_times, timedelta()) / len(queue_times)
    
    318
    +            else:
    
    319
    +                am_queue_time = timedelta()
    
    320
    +            record = self._forge_timer_metric_record(
    
    321
    +                MetricRecordDomain.STATE,
    
    322
    +                'average-queue-time',
    
    323
    +                am_queue_time)
    
    324
    +
    
    325
    +            await self.__monitoring_bus.send_record(record)
    
    326
    +
    
    327
    +        try:
    
    328
    +            while True:
    
    329
    +                start = time.time()
    
    330
    +                await __state_monitoring_worker()
    
    331
    +
    
    332
    +                end = time.time()
    
    333
    +                await asyncio.sleep(period - (end - start))
    
    334
    +
    
    335
    +        except asyncio.CancelledError:
    
    336
    +            pass
    
    337
    +
    
    338
    +    def _forge_counter_metric_record(self, domain, name, count, metadata=None):
    
    339
    +        counter_record = monitoring_pb2.MetricRecord()
    
    340
    +
    
    341
    +        counter_record.creation_timestamp.GetCurrentTime()
    
    342
    +        counter_record.domain = domain.value
    
    343
    +        counter_record.type = MetricRecordType.COUNTER.value
    
    344
    +        counter_record.name = name
    
    345
    +        counter_record.count = count
    
    346
    +        if metadata is not None:
    
    347
    +            counter_record.metadata.update(metadata)
    
    348
    +
    
    349
    +        return counter_record
    
    350
    +
    
    351
    +    def _forge_timer_metric_record(self, domain, name, duration, metadata=None):
    
    352
    +        timer_record = monitoring_pb2.MetricRecord()
    
    353
    +
    
    354
    +        timer_record.creation_timestamp.GetCurrentTime()
    
    355
    +        timer_record.domain = domain.value
    
    356
    +        timer_record.type = MetricRecordType.TIMER.value
    
    357
    +        timer_record.name = name
    
    358
    +        timer_record.duration.FromTimedelta(duration)
    
    359
    +        if metadata is not None:
    
    360
    +            timer_record.metadata.update(metadata)
    
    361
    +
    
    362
    +        return timer_record
    
    363
    +
    
    364
    +    def _forge_gauge_metric_record(self, domain, name, value, metadata=None):
    
    365
    +        gauge_record = monitoring_pb2.MetricRecord()
    
    366
    +
    
    367
    +        gauge_record.creation_timestamp.GetCurrentTime()
    
    368
    +        gauge_record.domain = domain.value
    
    369
    +        gauge_record.type = MetricRecordType.GAUGE.value
    
    370
    +        gauge_record.name = name
    
    371
    +        gauge_record.value = value
    
    372
    +        if metadata is not None:
    
    373
    +            gauge_record.metadata.update(metadata)
    
    374
    +
    
    375
    +        return gauge_record
    
    376
    +
    
    377
    +    # --- Private API: Monitoring ---
    
    378
    +
    
    379
    +    def _query_n_clients(self):
    
    380
    +        """Queries the number of clients connected."""
    
    381
    +        n_clients = self._execution_service.query_n_clients()
    
    382
    +        gauge_record = self._forge_gauge_metric_record(
    
    383
    +            MetricRecordDomain.STATE, 'clients-count', n_clients)
    
    384
    +
    
    385
    +        return n_clients, gauge_record
    
    386
    +
    
    387
    +    def _query_n_clients_for_instance(self, instance_name):
    
    388
    +        """Queries the number of clients connected for a given instance"""
    
    389
    +        n_clients = self._execution_service.query_n_clients_for_instance(instance_name)
    
    390
    +        gauge_record = self._forge_gauge_metric_record(
    
    391
    +            MetricRecordDomain.STATE, 'clients-count', n_clients,
    
    392
    +            metadata={'instance-name': instance_name or 'void'})
    
    393
    +
    
    394
    +        return n_clients, gauge_record
    
    395
    +
    
    396
    +    def _query_n_bots(self):
    
    397
    +        """Queries the number of bots connected."""
    
    398
    +        n_bots = self._bots_service.query_n_bots()
    
    399
    +        gauge_record = self._forge_gauge_metric_record(
    
    400
    +            MetricRecordDomain.STATE, 'bots-count', n_bots)
    
    401
    +
    
    402
    +        return n_bots, gauge_record
    
    403
    +
    
    404
    +    def _query_n_bots_for_instance(self, instance_name):
    
    405
    +        """Queries the number of bots connected for a given instance."""
    
    406
    +        n_bots = self._bots_service.query_n_bots_for_instance(instance_name)
    
    407
    +        gauge_record = self._forge_gauge_metric_record(
    
    408
    +            MetricRecordDomain.STATE, 'bots-count', n_bots,
    
    409
    +            metadata={'instance-name': instance_name or 'void'})
    
    410
    +
    
    411
    +        return n_bots, gauge_record
    
    412
    +
    
    413
    +    def _query_n_bots_for_status(self, bot_status):
    
    414
    +        """Queries the number of bots connected for a given health status."""
    
    415
    +        n_bots = self._bots_service.query_n_bots_for_status(bot_status)
    
    416
    +        gauge_record = self._forge_gauge_metric_record(
    
    417
    +            MetricRecordDomain.STATE, 'bots-count', n_bots,
    
    418
    +            metadata={'bot-status': bot_status.name})
    
    419
    +
    
    420
    +        return n_bots, gauge_record
    
    421
    +
    
    422
    +    def _query_am_queue_time_for_instance(self, instance_name):
    
    423
    +        """Queries the average job's queue time for a given instance."""
    
    424
    +        am_queue_time = self._schedulers[instance_name].query_am_queue_time()
    
    425
    +        timer_record = self._forge_timer_metric_record(
    
    426
    +            MetricRecordDomain.STATE, 'average-queue-time', am_queue_time,
    
    427
    +            metadata={'instance-name': instance_name or 'void'})
    
    428
    +
    
    429
    +        return am_queue_time, timer_record

  • buildgrid/server/job.py
    ... ... @@ -13,10 +13,11 @@
    13 13
     # limitations under the License.
    
    14 14
     
    
    15 15
     
    
    16
    +from datetime import datetime
    
    16 17
     import logging
    
    17 18
     import uuid
    
    18 19
     
    
    19
    -from google.protobuf import timestamp_pb2
    
    20
    +from google.protobuf import duration_pb2, timestamp_pb2
    
    20 21
     
    
    21 22
     from buildgrid._enums import LeaseState, OperationStage
    
    22 23
     from buildgrid._exceptions import CancelledError
    
    ... ... @@ -40,6 +41,7 @@ class Job:
    40 41
             self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata()
    
    41 42
     
    
    42 43
             self.__queued_timestamp = timestamp_pb2.Timestamp()
    
    44
    +        self.__queued_time_duration = duration_pb2.Duration()
    
    43 45
             self.__worker_start_timestamp = timestamp_pb2.Timestamp()
    
    44 46
             self.__worker_completed_timestamp = timestamp_pb2.Timestamp()
    
    45 47
     
    
    ... ... @@ -56,6 +58,8 @@ class Job:
    56 58
             self._operation.done = False
    
    57 59
             self._n_tries = 0
    
    58 60
     
    
    61
    +    # --- Public API ---
    
    62
    +
    
    59 63
         @property
    
    60 64
         def name(self):
    
    61 65
             return self._name
    
    ... ... @@ -193,7 +197,7 @@ class Job:
    193 197
                     result.Unpack(action_result)
    
    194 198
     
    
    195 199
                 action_metadata = action_result.execution_metadata
    
    196
    -            action_metadata.queued_timestamp.CopyFrom(self.__worker_start_timestamp)
    
    200
    +            action_metadata.queued_timestamp.CopyFrom(self.__queued_timestamp)
    
    197 201
                 action_metadata.worker_start_timestamp.CopyFrom(self.__worker_start_timestamp)
    
    198 202
                 action_metadata.worker_completed_timestamp.CopyFrom(self.__worker_completed_timestamp)
    
    199 203
     
    
    ... ... @@ -227,6 +231,10 @@ class Job:
    227 231
                     self.__queued_timestamp.GetCurrentTime()
    
    228 232
                 self._n_tries += 1
    
    229 233
     
    
    234
    +        elif self.__operation_metadata.stage == OperationStage.EXECUTING.value:
    
    235
    +            queue_in, queue_out = self.__queued_timestamp.ToDatetime(), datetime.now()
    
    236
    +            self.__queued_time_duration.FromTimedelta(queue_out - queue_in)
    
    237
    +
    
    230 238
             elif self.__operation_metadata.stage == OperationStage.COMPLETED.value:
    
    231 239
                 if self.__execute_response is not None:
    
    232 240
                     self._operation.response.Pack(self.__execute_response)
    
    ... ... @@ -260,3 +268,11 @@ class Job:
    260 268
             self.__execute_response.status.message = "Operation cancelled by client."
    
    261 269
     
    
    262 270
             self.update_operation_stage(OperationStage.COMPLETED)
    
    271
    +
    
    272
    +    # --- Public API: Monitoring ---
    
    273
    +
    
    274
    +    def query_queue_time(self):
    
    275
    +        return self.__queued_time_duration.ToTimedelta()
    
    276
    +
    
    277
    +    def query_n_retries(self):
    
    278
    +        return self._n_tries - 1 if self._n_tries > 0 else 0

  • buildgrid/server/operations/instance.py
    ... ... @@ -32,6 +32,10 @@ class OperationsInstance:
    32 32
     
    
    33 33
             self._scheduler = scheduler
    
    34 34
     
    
    35
    +    @property
    
    36
    +    def scheduler(self):
    
    37
    +        return self._scheduler
    
    38
    +
    
    35 39
         def register_instance_with_server(self, instance_name, server):
    
    36 40
             server.add_operations_instance(self, instance_name)
    
    37 41
     
    

  • buildgrid/server/operations/service.py
    ... ... @@ -38,8 +38,18 @@ class OperationsService(operations_pb2_grpc.OperationsServicer):
    38 38
     
    
    39 39
             operations_pb2_grpc.add_OperationsServicer_to_server(self, server)
    
    40 40
     
    
    41
    -    def add_instance(self, name, instance):
    
    42
    -        self._instances[name] = instance
    
    41
    +    # --- Public API ---
    
    42
    +
    
    43
    +    def add_instance(self, instance_name, instance):
    
    44
    +        """Registers a new servicer instance.
    
    45
    +
    
    46
    +        Args:
    
    47
    +            instance_name (str): The new instance's name.
    
    48
    +            instance (OperationsInstance): The new instance itself.
    
    49
    +        """
    
    50
    +        self._instances[instance_name] = instance
    
    51
    +
    
    52
    +    # --- Public API: Servicer ---
    
    43 53
     
    
    44 54
         def GetOperation(self, request, context):
    
    45 55
             self.__logger.debug("GetOperation request from [%s]", context.peer())
    
    ... ... @@ -127,6 +137,8 @@ class OperationsService(operations_pb2_grpc.OperationsServicer):
    127 137
     
    
    128 138
             return Empty()
    
    129 139
     
    
    140
    +    # --- Private API ---
    
    141
    +
    
    130 142
         def _parse_instance_name(self, name):
    
    131 143
             """ If the instance name is not blank, 'name' will have the form
    
    132 144
             {instance_name}/{operation_uuid}. Otherwise, it will just be
    

  • buildgrid/server/scheduler.py
    ... ... @@ -20,33 +20,70 @@ Schedules jobs.
    20 20
     """
    
    21 21
     
    
    22 22
     from collections import deque
    
    23
    +from datetime import timedelta
    
    23 24
     import logging
    
    24 25
     
    
    26
    +from buildgrid._enums import LeaseState, OperationStage
    
    25 27
     from buildgrid._exceptions import NotFoundError
    
    26 28
     
    
    27
    -from .job import OperationStage, LeaseState
    
    28
    -
    
    29 29
     
    
    30 30
     class Scheduler:
    
    31 31
     
    
    32 32
         MAX_N_TRIES = 5
    
    33 33
     
    
    34
    -    def __init__(self, action_cache=None):
    
    34
    +    def __init__(self, action_cache=None, monitor=False):
    
    35 35
             self.__logger = logging.getLogger(__name__)
    
    36 36
     
    
    37
    +        self.__operations_by_stage = None
    
    38
    +        self.__leases_by_state = None
    
    39
    +        self.__queue_time_average = None
    
    40
    +        self.__retries_count = 0
    
    41
    +
    
    37 42
             self._action_cache = action_cache
    
    38 43
             self.jobs = {}
    
    39 44
             self.queue = deque()
    
    40 45
     
    
    46
    +        self._is_instrumented = monitor
    
    47
    +
    
    48
    +        if self._is_instrumented:
    
    49
    +            self.__operations_by_stage = {}
    
    50
    +            self.__leases_by_state = {}
    
    51
    +            self.__queue_time_average = 0, timedelta()
    
    52
    +
    
    53
    +            self.__operations_by_stage[OperationStage.CACHE_CHECK] = set()
    
    54
    +            self.__operations_by_stage[OperationStage.QUEUED] = set()
    
    55
    +            self.__operations_by_stage[OperationStage.EXECUTING] = set()
    
    56
    +            self.__operations_by_stage[OperationStage.COMPLETED] = set()
    
    57
    +
    
    58
    +            self.__leases_by_state[LeaseState.PENDING] = set()
    
    59
    +            self.__leases_by_state[LeaseState.ACTIVE] = set()
    
    60
    +            self.__leases_by_state[LeaseState.COMPLETED] = set()
    
    61
    +
    
    62
    +    # --- Public API ---
    
    63
    +
    
    41 64
         def register_client(self, job_name, queue):
    
    42
    -        self.jobs[job_name].register_client(queue)
    
    65
    +        job = self.jobs[job_name]
    
    66
    +
    
    67
    +        job.register_client(queue)
    
    43 68
     
    
    44 69
         def unregister_client(self, job_name, queue):
    
    45
    -        self.jobs[job_name].unregister_client(queue)
    
    70
    +        job = self.jobs[job_name]
    
    46 71
     
    
    47
    -        if not self.jobs[job_name].n_clients and self.jobs[job_name].operation.done:
    
    72
    +        job.unregister_client(queue)
    
    73
    +
    
    74
    +        if not job.n_clients and job.operation.done:
    
    48 75
                 del self.jobs[job_name]
    
    49 76
     
    
    77
    +            if self._is_instrumented:
    
    78
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    79
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    80
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    81
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    82
    +
    
    83
    +                self.__leases_by_state[LeaseState.PENDING].discard(job_name)
    
    84
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(job_name)
    
    85
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(job_name)
    
    86
    +
    
    50 87
         def queue_job(self, job, skip_cache_lookup=False):
    
    51 88
             self.jobs[job.name] = job
    
    52 89
     
    
    ... ... @@ -62,23 +99,30 @@ class Scheduler:
    62 99
                     job.set_cached_result(action_result)
    
    63 100
                     operation_stage = OperationStage.COMPLETED
    
    64 101
     
    
    102
    +                if self._is_instrumented:
    
    103
    +                    self.__retries_count += 1
    
    104
    +
    
    65 105
             else:
    
    66 106
                 operation_stage = OperationStage.QUEUED
    
    67 107
                 self.queue.append(job)
    
    68 108
     
    
    69
    -        job.update_operation_stage(operation_stage)
    
    109
    +        self._update_job_operation_stage(job.name, operation_stage)
    
    70 110
     
    
    71 111
         def retry_job(self, job_name):
    
    72
    -        if job_name in self.jobs:
    
    73
    -            job = self.jobs[job_name]
    
    74
    -            if job.n_tries >= self.MAX_N_TRIES:
    
    75
    -                # TODO: Decide what to do with these jobs
    
    76
    -                job.update_operation_stage(OperationStage.COMPLETED)
    
    77
    -                # TODO: Mark these jobs as done
    
    78
    -            else:
    
    79
    -                job.update_operation_stage(OperationStage.QUEUED)
    
    80
    -                job.update_lease_state(LeaseState.PENDING)
    
    81
    -                self.queue.append(job)
    
    112
    +        job = self.jobs[job_name]
    
    113
    +
    
    114
    +        operation_stage = None
    
    115
    +        if job.n_tries >= self.MAX_N_TRIES:
    
    116
    +            # TODO: Decide what to do with these jobs
    
    117
    +            operation_stage = OperationStage.COMPLETED
    
    118
    +            # TODO: Mark these jobs as done
    
    119
    +
    
    120
    +        else:
    
    121
    +            operation_stage = OperationStage.QUEUED
    
    122
    +            job.update_lease_state(LeaseState.PENDING)
    
    123
    +            self.queue.append(job)
    
    124
    +
    
    125
    +        self._update_job_operation_stage(job_name, operation_stage)
    
    82 126
     
    
    83 127
         def list_jobs(self):
    
    84 128
             return self.jobs.values()
    
    ... ... @@ -118,17 +162,27 @@ class Scheduler:
    118 162
                 lease_result (google.protobuf.Any): the lease execution result, only
    
    119 163
                     required if `lease_state` is `COMPLETED`.
    
    120 164
             """
    
    121
    -
    
    122 165
             job = self.jobs[lease.id]
    
    123 166
             lease_state = LeaseState(lease.state)
    
    124 167
     
    
    168
    +        operation_stage = None
    
    125 169
             if lease_state == LeaseState.PENDING:
    
    126 170
                 job.update_lease_state(LeaseState.PENDING)
    
    127
    -            job.update_operation_stage(OperationStage.QUEUED)
    
    171
    +            operation_stage = OperationStage.QUEUED
    
    172
    +
    
    173
    +            if self._is_instrumented:
    
    174
    +                self.__leases_by_state[LeaseState.PENDING].add(lease.id)
    
    175
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
    
    176
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
    
    128 177
     
    
    129 178
             elif lease_state == LeaseState.ACTIVE:
    
    130 179
                 job.update_lease_state(LeaseState.ACTIVE)
    
    131
    -            job.update_operation_stage(OperationStage.EXECUTING)
    
    180
    +            operation_stage = OperationStage.EXECUTING
    
    181
    +
    
    182
    +            if self._is_instrumented:
    
    183
    +                self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
    
    184
    +                self.__leases_by_state[LeaseState.ACTIVE].add(lease.id)
    
    185
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
    
    132 186
     
    
    133 187
             elif lease_state == LeaseState.COMPLETED:
    
    134 188
                 job.update_lease_state(LeaseState.COMPLETED,
    
    ... ... @@ -137,7 +191,14 @@ class Scheduler:
    137 191
                 if self._action_cache is not None and not job.do_not_cache:
    
    138 192
                     self._action_cache.update_action_result(job.action_digest, job.action_result)
    
    139 193
     
    
    140
    -            job.update_operation_stage(OperationStage.COMPLETED)
    
    194
    +            operation_stage = OperationStage.COMPLETED
    
    195
    +
    
    196
    +            if self._is_instrumented:
    
    197
    +                self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
    
    198
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
    
    199
    +                self.__leases_by_state[LeaseState.COMPLETED].add(lease.id)
    
    200
    +
    
    201
    +        self._update_job_operation_stage(lease.id, operation_stage)
    
    141 202
     
    
    142 203
         def get_job_lease(self, job_name):
    
    143 204
             """Returns the lease associated to job, if any have been emitted yet."""
    
    ... ... @@ -160,3 +221,101 @@ class Scheduler:
    160 221
                 job_name (str): name of the job holding the operation to cancel.
    
    161 222
             """
    
    162 223
             self.jobs[job_name].cancel_operation()
    
    224
    +
    
    225
    +    # --- Public API: Monitoring ---
    
    226
    +
    
    227
    +    @property
    
    228
    +    def is_instrumented(self):
    
    229
    +        return self._is_instrumented
    
    230
    +
    
    231
    +    def query_n_jobs(self):
    
    232
    +        return len(self.jobs)
    
    233
    +
    
    234
    +    def query_n_operations(self):
    
    235
    +        # For now n_operations == n_jobs:
    
    236
    +        return len(self.jobs)
    
    237
    +
    
    238
    +    def query_n_operations_by_stage(self, operation_stage):
    
    239
    +        try:
    
    240
    +            if self.__operations_by_stage is not None:
    
    241
    +                return len(self.__operations_by_stage[operation_stage])
    
    242
    +        except KeyError:
    
    243
    +            pass
    
    244
    +        return 0
    
    245
    +
    
    246
    +    def query_n_leases(self):
    
    247
    +        return len(self.jobs)
    
    248
    +
    
    249
    +    def query_n_leases_by_state(self, lease_state):
    
    250
    +        try:
    
    251
    +            if self.__leases_by_state is not None:
    
    252
    +                return len(self.__leases_by_state[lease_state])
    
    253
    +        except KeyError:
    
    254
    +            pass
    
    255
    +        return 0
    
    256
    +
    
    257
    +    def query_n_retries(self):
    
    258
    +        return self.__retries_count
    
    259
    +
    
    260
    +    def query_am_queue_time(self):
    
    261
    +        if self.__queue_time_average is not None:
    
    262
    +            return self.__queue_time_average[1]
    
    263
    +        return timedelta()
    
    264
    +
    
    265
    +    # --- Private API ---
    
    266
    +
    
    267
    +    def _update_job_operation_stage(self, job_name, operation_stage):
    
    268
    +        """Requests a stage transition for the job's :class:Operations.
    
    269
    +
    
    270
    +        Args:
    
    271
    +            job_name (str): name of the job to query.
    
    272
    +            operation_stage (OperationStage): the stage to transition to.
    
    273
    +        """
    
    274
    +        job = self.jobs[job_name]
    
    275
    +
    
    276
    +        if operation_stage == OperationStage.CACHE_CHECK:
    
    277
    +            job.update_operation_stage(OperationStage.CACHE_CHECK)
    
    278
    +
    
    279
    +            if self._is_instrumented:
    
    280
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].add(job_name)
    
    281
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    282
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    283
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    284
    +
    
    285
    +        elif operation_stage == OperationStage.QUEUED:
    
    286
    +            job.update_operation_stage(OperationStage.QUEUED)
    
    287
    +
    
    288
    +            if self._is_instrumented:
    
    289
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    290
    +                self.__operations_by_stage[OperationStage.QUEUED].add(job_name)
    
    291
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    292
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    293
    +
    
    294
    +        elif operation_stage == OperationStage.EXECUTING:
    
    295
    +            job.update_operation_stage(OperationStage.EXECUTING)
    
    296
    +
    
    297
    +            if self._is_instrumented:
    
    298
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    299
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    300
    +                self.__operations_by_stage[OperationStage.EXECUTING].add(job_name)
    
    301
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    302
    +
    
    303
    +        elif operation_stage == OperationStage.COMPLETED:
    
    304
    +            job.update_operation_stage(OperationStage.COMPLETED)
    
    305
    +
    
    306
    +            if self._is_instrumented:
    
    307
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    308
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    309
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    310
    +                self.__operations_by_stage[OperationStage.COMPLETED].add(job_name)
    
    311
    +
    
    312
    +                average_order, average_time = self.__queue_time_average
    
    313
    +
    
    314
    +                average_order += 1
    
    315
    +                if average_order <= 1:
    
    316
    +                    average_time = job.query_queue_time()
    
    317
    +                else:
    
    318
    +                    queue_time = job.query_queue_time()
    
    319
    +                    average_time = average_time + ((queue_time - average_time) / average_order)
    
    320
    +
    
    321
    +                self.__queue_time_average = average_order, average_time

  • buildgrid/settings.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    1 16
     import hashlib
    
    2 17
     
    
    3 18
     
    
    4
    -# The hash function that CAS uses
    
    19
    +# Hash function used for computing digests:
    
    5 20
     HASH = hashlib.sha256
    
    21
    +
    
    22
    +# Lenght in bytes of a hash string returned by HASH:
    
    6 23
     HASH_LENGTH = HASH().digest_size * 2
    
    24
    +
    
    25
    +# Period, in seconds, for the monitoring cycle:
    
    26
    +MONITORING_PERIOD = 5.0

  • buildgrid/utils.py
    ... ... @@ -30,6 +30,14 @@ def get_hostname():
    30 30
         return socket.gethostname()
    
    31 31
     
    
    32 32
     
    
    33
    +def get_hash_type():
    
    34
    +    """Returns the hash type."""
    
    35
    +    hash_name = HASH().name
    
    36
    +    if hash_name == "sha256":
    
    37
    +        return remote_execution_pb2.SHA256
    
    38
    +    return remote_execution_pb2.UNKNOWN
    
    39
    +
    
    40
    +
    
    33 41
     def create_digest(bytes_to_digest):
    
    34 42
         """Computes the :obj:`Digest` of a piece of data.
    
    35 43
     
    

  • setup.py
    ... ... @@ -112,13 +112,15 @@ setup(
    112 112
         license="Apache License, Version 2.0",
    
    113 113
         description="A remote execution service",
    
    114 114
         packages=find_packages(),
    
    115
    +    python_requires='>= 3.5.3',  # janus requirement
    
    115 116
         install_requires=[
    
    116
    -        'protobuf',
    
    117
    -        'grpcio',
    
    118
    -        'Click',
    
    119
    -        'PyYAML',
    
    120 117
             'boto3 < 1.8.0',
    
    121 118
             'botocore < 1.11.0',
    
    119
    +        'click',
    
    120
    +        'grpcio',
    
    121
    +        'janus',
    
    122
    +        'protobuf',
    
    123
    +        'pyyaml',
    
    122 124
         ],
    
    123 125
         entry_points={
    
    124 126
             'console_scripts': [
    

  • tests/integration/capabilities_service.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +# pylint: disable=redefined-outer-name
    
    16
    +
    
    17
    +
    
    18
    +import grpc
    
    19
    +import pytest
    
    20
    +
    
    21
    +from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
    
    22
    +from buildgrid.client.capabilities import CapabilitiesInterface
    
    23
    +from buildgrid.server.controller import ExecutionController
    
    24
    +from buildgrid.server.actioncache.storage import ActionCache
    
    25
    +from buildgrid.server.cas.instance import ContentAddressableStorageInstance
    
    26
    +from buildgrid.server.cas.storage.lru_memory_cache import LRUMemoryCache
    
    27
    +
    
    28
    +from ..utils.utils import run_in_subprocess
    
    29
    +from ..utils.capabilities import serve_capabilities_service
    
    30
    +
    
    31
    +
    
    32
    +INSTANCES = ['', 'instance']
    
    33
    +
    
    34
    +
    
    35
    +# Use subprocess to avoid creation of gRPC threads in main process
    
    36
    +# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md
    
    37
    +# Multiprocessing uses pickle which protobufs don't work with
    
    38
    +# Workaround wrapper to send messages as strings
    
    39
    +class ServerInterface:
    
    40
    +
    
    41
    +    def __init__(self, remote):
    
    42
    +        self.__remote = remote
    
    43
    +
    
    44
    +    def get_capabilities(self, instance_name):
    
    45
    +
    
    46
    +        def __get_capabilities(queue, remote, instance_name):
    
    47
    +            interface = CapabilitiesInterface(grpc.insecure_channel(remote))
    
    48
    +
    
    49
    +            result = interface.get_capabilities(instance_name)
    
    50
    +            queue.put(result.SerializeToString())
    
    51
    +
    
    52
    +        result = run_in_subprocess(__get_capabilities,
    
    53
    +                                   self.__remote, instance_name)
    
    54
    +
    
    55
    +        capabilities = remote_execution_pb2.ServerCapabilities()
    
    56
    +        capabilities.ParseFromString(result)
    
    57
    +        return capabilities
    
    58
    +
    
    59
    +
    
    60
    +@pytest.mark.parametrize('instance', INSTANCES)
    
    61
    +def test_execution_not_available_capabilities(instance):
    
    62
    +    with serve_capabilities_service([instance]) as server:
    
    63
    +        server_interface = ServerInterface(server.remote)
    
    64
    +        response = server_interface.get_capabilities(instance)
    
    65
    +
    
    66
    +        assert not response.execution_capabilities.exec_enabled
    
    67
    +
    
    68
    +
    
    69
    +@pytest.mark.parametrize('instance', INSTANCES)
    
    70
    +def test_execution_available_capabilities(instance):
    
    71
    +    controller = ExecutionController()
    
    72
    +
    
    73
    +    with serve_capabilities_service([instance],
    
    74
    +                                    execution_instance=controller.execution_instance) as server:
    
    75
    +        server_interface = ServerInterface(server.remote)
    
    76
    +        response = server_interface.get_capabilities(instance)
    
    77
    +
    
    78
    +        assert response.execution_capabilities.exec_enabled
    
    79
    +        assert response.execution_capabilities.digest_function
    
    80
    +
    
    81
    +
    
    82
    +@pytest.mark.parametrize('instance', INSTANCES)
    
    83
    +def test_action_cache_allow_updates_capabilities(instance):
    
    84
    +    storage = LRUMemoryCache(limit=256)
    
    85
    +    action_cache = ActionCache(storage, max_cached_refs=256, allow_updates=True)
    
    86
    +
    
    87
    +    with serve_capabilities_service([instance],
    
    88
    +                                    action_cache_instance=action_cache) as server:
    
    89
    +        server_interface = ServerInterface(server.remote)
    
    90
    +        response = server_interface.get_capabilities(instance)
    
    91
    +
    
    92
    +        assert response.cache_capabilities.action_cache_update_capabilities.update_enabled
    
    93
    +
    
    94
    +
    
    95
    +@pytest.mark.parametrize('instance', INSTANCES)
    
    96
    +def test_action_cache_not_allow_updates_capabilities(instance):
    
    97
    +    storage = LRUMemoryCache(limit=256)
    
    98
    +    action_cache = ActionCache(storage, max_cached_refs=256, allow_updates=False)
    
    99
    +
    
    100
    +    with serve_capabilities_service([instance],
    
    101
    +                                    action_cache_instance=action_cache) as server:
    
    102
    +        server_interface = ServerInterface(server.remote)
    
    103
    +        response = server_interface.get_capabilities(instance)
    
    104
    +
    
    105
    +        assert not response.cache_capabilities.action_cache_update_capabilities.update_enabled
    
    106
    +
    
    107
    +
    
    108
    +@pytest.mark.parametrize('instance', INSTANCES)
    
    109
    +def test_cas_capabilities(instance):
    
    110
    +    cas = ContentAddressableStorageInstance(None)
    
    111
    +
    
    112
    +    with serve_capabilities_service([instance],
    
    113
    +                                    cas_instance=cas) as server:
    
    114
    +        server_interface = ServerInterface(server.remote)
    
    115
    +        response = server_interface.get_capabilities(instance)
    
    116
    +
    
    117
    +        assert len(response.cache_capabilities.digest_function) == 1
    
    118
    +        assert response.cache_capabilities.digest_function[0]
    
    119
    +        assert response.cache_capabilities.symlink_absolute_path_strategy
    
    120
    +        assert response.cache_capabilities.max_batch_total_size_bytes

  • tests/utils/capabilities.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    16
    +from concurrent import futures
    
    17
    +from contextlib import contextmanager
    
    18
    +import multiprocessing
    
    19
    +import os
    
    20
    +import signal
    
    21
    +
    
    22
    +import grpc
    
    23
    +import pytest_cov
    
    24
    +
    
    25
    +from buildgrid.server.capabilities.service import CapabilitiesService
    
    26
    +from buildgrid.server.capabilities.instance import CapabilitiesInstance
    
    27
    +
    
    28
    +
    
    29
    +@contextmanager
    
    30
    +def serve_capabilities_service(instances,
    
    31
    +                               cas_instance=None,
    
    32
    +                               action_cache_instance=None,
    
    33
    +                               execution_instance=None):
    
    34
    +    server = Server(instances,
    
    35
    +                    cas_instance,
    
    36
    +                    action_cache_instance,
    
    37
    +                    execution_instance)
    
    38
    +    try:
    
    39
    +        yield server
    
    40
    +    finally:
    
    41
    +        server.quit()
    
    42
    +
    
    43
    +
    
    44
    +class Server:
    
    45
    +
    
    46
    +    def __init__(self, instances,
    
    47
    +                 cas_instance=None,
    
    48
    +                 action_cache_instance=None,
    
    49
    +                 execution_instance=None):
    
    50
    +        self.instances = instances
    
    51
    +
    
    52
    +        self.__queue = multiprocessing.Queue()
    
    53
    +        self.__process = multiprocessing.Process(
    
    54
    +            target=Server.serve,
    
    55
    +            args=(self.__queue, self.instances, cas_instance, action_cache_instance, execution_instance))
    
    56
    +        self.__process.start()
    
    57
    +
    
    58
    +        self.port = self.__queue.get(timeout=1)
    
    59
    +        self.remote = 'localhost:{}'.format(self.port)
    
    60
    +
    
    61
    +    @staticmethod
    
    62
    +    def serve(queue, instances, cas_instance, action_cache_instance, execution_instance):
    
    63
    +        pytest_cov.embed.cleanup_on_sigterm()
    
    64
    +
    
    65
    +        # Use max_workers default from Python 3.5+
    
    66
    +        max_workers = (os.cpu_count() or 1) * 5
    
    67
    +        server = grpc.server(futures.ThreadPoolExecutor(max_workers))
    
    68
    +        port = server.add_insecure_port('localhost:0')
    
    69
    +
    
    70
    +        capabilities_service = CapabilitiesService(server)
    
    71
    +        for name in instances:
    
    72
    +            capabilities_instance = CapabilitiesInstance(cas_instance, action_cache_instance, execution_instance)
    
    73
    +            capabilities_service.add_instance(name, capabilities_instance)
    
    74
    +
    
    75
    +        server.start()
    
    76
    +        queue.put(port)
    
    77
    +        signal.pause()
    
    78
    +
    
    79
    +    def quit(self):
    
    80
    +        if self.__process:
    
    81
    +            self.__process.terminate()
    
    82
    +            self.__process.join()



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]