[Notes] [Git][BuildGrid/buildgrid][mablanch/139-emit-build-metrics] 18 commits: execution/service.py: Calculate client counts



Title: GitLab

Martin Blanchard pushed to branch mablanch/139-emit-build-metrics at BuildGrid / buildgrid

Commits:

17 changed files:

Changes:

  • .gitlab-ci.yml
    ... ... @@ -2,7 +2,7 @@
    2 2
     image: python:3.5-stretch
    
    3 3
     
    
    4 4
     variables:
    
    5
    -  BGD: bgd --verbose
    
    5
    +  BGD: bgd
    
    6 6
     
    
    7 7
     stages:
    
    8 8
       - test
    

  • .pylintrc
    ... ... @@ -185,6 +185,7 @@ ignore-on-opaque-inference=yes
    185 185
     # for classes with dynamically set attributes). This supports the use of
    
    186 186
     # qualified names.
    
    187 187
     ignored-classes=google.protobuf.any_pb2.Any,
    
    188
    +                google.protobuf.duration_pb2.Duration,
    
    188 189
                     google.protobuf.timestamp_pb2.Timestamp
    
    189 190
     
    
    190 191
     # List of module names for which member attributes should not be checked
    
    ... ... @@ -460,6 +461,7 @@ known-third-party=boto3,
    460 461
                       enchant,
    
    461 462
                       google,
    
    462 463
                       grpc,
    
    464
    +                  janus,
    
    463 465
                       moto,
    
    464 466
                       yaml
    
    465 467
     
    

  • buildgrid/_app/cli.py
    ... ... @@ -23,10 +23,12 @@ will be attempted to be imported.
    23 23
     
    
    24 24
     import logging
    
    25 25
     import os
    
    26
    +import sys
    
    26 27
     
    
    27 28
     import click
    
    28 29
     import grpc
    
    29 30
     
    
    31
    +from buildgrid.settings import LOG_RECORD_FORMAT
    
    30 32
     from buildgrid.utils import read_file
    
    31 33
     
    
    32 34
     CONTEXT_SETTINGS = dict(auto_envvar_prefix='BUILDGRID')
    
    ... ... @@ -138,28 +140,70 @@ class BuildGridCLI(click.MultiCommand):
    138 140
             return mod.cli
    
    139 141
     
    
    140 142
     
    
    143
    +class DebugFilter(logging.Filter):
    
    144
    +
    
    145
    +    def __init__(self, debug_domains, name=''):
    
    146
    +        super().__init__(name=name)
    
    147
    +        self.__domains_tree = {}
    
    148
    +
    
    149
    +        for domain in debug_domains.split(','):
    
    150
    +            domains_tree = self.__domains_tree
    
    151
    +            for label in domain.split('.'):
    
    152
    +                if label not in domains_tree:
    
    153
    +                    domains_tree[label] = {}
    
    154
    +                domains_tree = domains_tree[label]
    
    155
    +
    
    156
    +    def filter(self, record):
    
    157
    +        domains_tree = self.__domains_tree
    
    158
    +        for label in record.name.split('.'):
    
    159
    +            if '*' in domains_tree:
    
    160
    +                return True
    
    161
    +            if label not in domains_tree:
    
    162
    +                return False
    
    163
    +            domains_tree = domains_tree[label]
    
    164
    +        return True
    
    165
    +
    
    166
    +
    
    167
    +def setup_logging(verbosity=0, debug_mode=False):
    
    168
    +    """Deals with loggers verbosity"""
    
    169
    +    asyncio_logger = logging.getLogger('asyncio')
    
    170
    +    root_logger = logging.getLogger()
    
    171
    +
    
    172
    +    log_handler = logging.StreamHandler(stream=sys.stdout)
    
    173
    +    for log_filter in root_logger.filters:
    
    174
    +        log_handler.addFilter(log_filter)
    
    175
    +
    
    176
    +    logging.basicConfig(format=LOG_RECORD_FORMAT, handlers=[log_handler])
    
    177
    +
    
    178
    +    if verbosity == 1:
    
    179
    +        root_logger.setLevel(logging.WARNING)
    
    180
    +    elif verbosity == 2:
    
    181
    +        root_logger.setLevel(logging.INFO)
    
    182
    +    elif verbosity >= 3:
    
    183
    +        root_logger.setLevel(logging.DEBUG)
    
    184
    +    else:
    
    185
    +        root_logger.setLevel(logging.ERROR)
    
    186
    +
    
    187
    +    if not debug_mode:
    
    188
    +        asyncio_logger.setLevel(logging.CRITICAL)
    
    189
    +    else:
    
    190
    +        asyncio_logger.setLevel(logging.DEBUG)
    
    191
    +        root_logger.setLevel(logging.DEBUG)
    
    192
    +
    
    193
    +
    
    141 194
     @click.command(cls=BuildGridCLI, context_settings=CONTEXT_SETTINGS)
    
    142
    -@click.option('-v', '--verbose', count=True,
    
    143
    -              help='Increase log verbosity level.')
    
    144 195
     @pass_context
    
    145
    -def cli(context, verbose):
    
    196
    +def cli(context):
    
    146 197
         """BuildGrid App"""
    
    147
    -    logger = logging.getLogger()
    
    198
    +    root_logger = logging.getLogger()
    
    148 199
     
    
    149 200
         # Clean-up root logger for any pre-configuration:
    
    150
    -    for log_handler in logger.handlers[:]:
    
    151
    -        logger.removeHandler(log_handler)
    
    152
    -    for log_filter in logger.filters[:]:
    
    153
    -        logger.removeFilter(log_filter)
    
    154
    -
    
    155
    -    logging.basicConfig(
    
    156
    -        format='%(asctime)s:%(name)32.32s][%(levelname)5.5s]: %(message)s')
    
    157
    -
    
    158
    -    if verbose == 1:
    
    159
    -        logger.setLevel(logging.WARNING)
    
    160
    -    elif verbose == 2:
    
    161
    -        logger.setLevel(logging.INFO)
    
    162
    -    elif verbose >= 3:
    
    163
    -        logger.setLevel(logging.DEBUG)
    
    164
    -    else:
    
    165
    -        logger.setLevel(logging.ERROR)
    201
    +    for log_handler in root_logger.handlers[:]:
    
    202
    +        root_logger.removeHandler(log_handler)
    
    203
    +    for log_filter in root_logger.filters[:]:
    
    204
    +        root_logger.removeFilter(log_filter)
    
    205
    +
    
    206
    +    # Filter debug messages using BGD_MESSAGE_DEBUG value:
    
    207
    +    debug_domains = os.environ.get('BGD_MESSAGE_DEBUG', None)
    
    208
    +    if debug_domains:
    
    209
    +        root_logger.addFilter(DebugFilter(debug_domains))

  • buildgrid/_app/commands/cmd_bot.py
    ... ... @@ -34,7 +34,7 @@ from buildgrid.bot.hardware.worker import Worker
    34 34
     
    
    35 35
     
    
    36 36
     from ..bots import buildbox, dummy, host
    
    37
    -from ..cli import pass_context
    
    37
    +from ..cli import pass_context, setup_logging
    
    38 38
     
    
    39 39
     
    
    40 40
     @click.group(name='bot', short_help="Create and register bot clients.")
    
    ... ... @@ -58,9 +58,12 @@ from ..cli import pass_context
    58 58
                   help="Time period for bot updates to the server in seconds.")
    
    59 59
     @click.option('--parent', type=click.STRING, default='main', show_default=True,
    
    60 60
                   help="Targeted farm resource.")
    
    61
    +@click.option('-v', '--verbose', count=True,
    
    62
    +              help='Increase log verbosity level.')
    
    61 63
     @pass_context
    
    62 64
     def cli(context, parent, update_period, remote, client_key, client_cert, server_cert,
    
    63
    -        remote_cas, cas_client_key, cas_client_cert, cas_server_cert):
    
    65
    +        remote_cas, cas_client_key, cas_client_cert, cas_server_cert, verbose):
    
    66
    +    setup_logging(verbosity=verbose)
    
    64 67
         # Setup the remote execution server channel:
    
    65 68
         url = urlparse(remote)
    
    66 69
     
    
    ... ... @@ -122,9 +125,8 @@ def cli(context, parent, update_period, remote, client_key, client_cert, server_
    122 125
             context.cas_client_cert = context.client_cert
    
    123 126
             context.cas_server_cert = context.server_cert
    
    124 127
     
    
    125
    -    click.echo("Starting for remote=[{}]".format(context.remote))
    
    126
    -
    
    127 128
         bot_interface = interface.BotInterface(context.channel)
    
    129
    +
    
    128 130
         worker = Worker()
    
    129 131
         worker.add_device(Device())
    
    130 132
         hardware_interface = HardwareInterface(worker)
    

  • buildgrid/_app/commands/cmd_server.py
    ... ... @@ -26,7 +26,7 @@ import click
    26 26
     
    
    27 27
     from buildgrid.server.instance import BuildGridServer
    
    28 28
     
    
    29
    -from ..cli import pass_context
    
    29
    +from ..cli import pass_context, setup_logging
    
    30 30
     from ..settings import parser
    
    31 31
     
    
    32 32
     
    
    ... ... @@ -37,9 +37,14 @@ def cli(context):
    37 37
     
    
    38 38
     
    
    39 39
     @cli.command('start', short_help="Setup a new server instance.")
    
    40
    -@click.argument('CONFIG', type=click.Path(file_okay=True, dir_okay=False, writable=False))
    
    40
    +@click.argument('CONFIG',
    
    41
    +                type=click.Path(file_okay=True, dir_okay=False, writable=False))
    
    42
    +@click.option('-v', '--verbose', count=True,
    
    43
    +              help='Increase log verbosity level.')
    
    41 44
     @pass_context
    
    42
    -def start(context, config):
    
    45
    +def start(context, config, verbose):
    
    46
    +    setup_logging(verbosity=verbose)
    
    47
    +
    
    43 48
         with open(config) as f:
    
    44 49
             settings = parser.get_parser().safe_load(f)
    
    45 50
     
    

  • buildgrid/server/_monitoring.py
    ... ... @@ -156,9 +156,11 @@ class MonitoringBus:
    156 156
                     output_writers.append(output_file)
    
    157 157
     
    
    158 158
                     while True:
    
    159
    -                    if await __streaming_worker(iter(output_file)):
    
    159
    +                    if await __streaming_worker([output_file]):
    
    160 160
                             self.__sequence_number += 1
    
    161 161
     
    
    162
    +                        output_file.flush()
    
    163
    +
    
    162 164
                 else:
    
    163 165
                     output_writers.append(sys.stdout.buffer)
    
    164 166
     
    

  • buildgrid/server/bots/instance.py
    ... ... @@ -37,6 +37,10 @@ class BotsInterface:
    37 37
             self._assigned_leases = {}
    
    38 38
             self._scheduler = scheduler
    
    39 39
     
    
    40
    +    @property
    
    41
    +    def scheduler(self):
    
    42
    +        return self._scheduler
    
    43
    +
    
    40 44
         def register_instance_with_server(self, instance_name, server):
    
    41 45
             server.add_bots_interface(self, instance_name)
    
    42 46
     
    

  • buildgrid/server/bots/service.py
    ... ... @@ -23,8 +23,9 @@ import logging
    23 23
     
    
    24 24
     import grpc
    
    25 25
     
    
    26
    -from google.protobuf.empty_pb2 import Empty
    
    26
    +from google.protobuf import empty_pb2, timestamp_pb2
    
    27 27
     
    
    28
    +from buildgrid._enums import BotStatus
    
    28 29
     from buildgrid._exceptions import InvalidArgumentError, OutOfSyncError
    
    29 30
     from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
    
    30 31
     from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grpc
    
    ... ... @@ -32,24 +33,86 @@ from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grp
    32 33
     
    
    33 34
     class BotsService(bots_pb2_grpc.BotsServicer):
    
    34 35
     
    
    35
    -    def __init__(self, server):
    
    36
    +    def __init__(self, server, monitor=False):
    
    36 37
             self.__logger = logging.getLogger(__name__)
    
    37 38
     
    
    39
    +        self.__bots_by_status = None
    
    40
    +        self.__bots_by_instance = None
    
    41
    +        self.__bots = None
    
    42
    +
    
    38 43
             self._instances = {}
    
    39 44
     
    
    40 45
             bots_pb2_grpc.add_BotsServicer_to_server(self, server)
    
    41 46
     
    
    42
    -    def add_instance(self, name, instance):
    
    43
    -        self._instances[name] = instance
    
    47
    +        self._is_instrumented = monitor
    
    48
    +
    
    49
    +        if self._is_instrumented:
    
    50
    +            self.__bots_by_status = {}
    
    51
    +            self.__bots_by_instance = {}
    
    52
    +            self.__bots = {}
    
    53
    +
    
    54
    +            self.__bots_by_status[BotStatus.OK] = set()
    
    55
    +            self.__bots_by_status[BotStatus.UNHEALTHY] = set()
    
    56
    +
    
    57
    +    # --- Public API ---
    
    58
    +
    
    59
    +    def add_instance(self, instance_name, instance):
    
    60
    +        """Registers a new servicer instance.
    
    61
    +
    
    62
    +        Args:
    
    63
    +            instance_name (str): The new instance's name.
    
    64
    +            instance (BotsInterface): The new instance itself.
    
    65
    +        """
    
    66
    +        self._instances[instance_name] = instance
    
    67
    +
    
    68
    +        if self._is_instrumented:
    
    69
    +            self.__bots_by_instance[instance_name] = set()
    
    70
    +
    
    71
    +    def get_scheduler(self, instance_name):
    
    72
    +        """Retrieves a reference to the scheduler for an instance.
    
    73
    +
    
    74
    +        Args:
    
    75
    +            instance_name (str): The name of the instance to query.
    
    76
    +
    
    77
    +        Returns:
    
    78
    +            Scheduler: A reference to the scheduler for `instance_name`.
    
    79
    +
    
    80
    +        Raises:
    
    81
    +            InvalidArgumentError: If no instance named `instance_name` exists.
    
    82
    +        """
    
    83
    +        instance = self._get_instance(instance_name)
    
    84
    +
    
    85
    +        return instance.scheduler
    
    86
    +
    
    87
    +    # --- Public API: Servicer ---
    
    44 88
     
    
    45 89
         def CreateBotSession(self, request, context):
    
    90
    +        """Handles CreateBotSessionRequest messages.
    
    91
    +
    
    92
    +        Args:
    
    93
    +            request (CreateBotSessionRequest): The incoming RPC request.
    
    94
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    95
    +        """
    
    46 96
             self.__logger.debug("CreateBotSession request from [%s]", context.peer())
    
    47 97
     
    
    98
    +        instance_name = request.parent
    
    99
    +        bot_status = BotStatus(request.bot_session.status)
    
    100
    +        bot_id = request.bot_session.bot_id
    
    101
    +
    
    48 102
             try:
    
    49
    -            parent = request.parent
    
    50
    -            instance = self._get_instance(request.parent)
    
    51
    -            return instance.create_bot_session(parent,
    
    52
    -                                               request.bot_session)
    
    103
    +            instance = self._get_instance(instance_name)
    
    104
    +            bot_session = instance.create_bot_session(instance_name,
    
    105
    +                                                      request.bot_session)
    
    106
    +            now = timestamp_pb2.Timestamp()
    
    107
    +            now.GetCurrentTime()
    
    108
    +
    
    109
    +            if self._is_instrumented:
    
    110
    +                self.__bots[bot_id] = now
    
    111
    +                self.__bots_by_instance[instance_name].add(bot_id)
    
    112
    +                if bot_status in self.__bots_by_status:
    
    113
    +                    self.__bots_by_status[bot_status].add(bot_id)
    
    114
    +
    
    115
    +            return bot_session
    
    53 116
     
    
    54 117
             except InvalidArgumentError as e:
    
    55 118
                 self.__logger.error(e)
    
    ... ... @@ -59,17 +122,41 @@ class BotsService(bots_pb2_grpc.BotsServicer):
    59 122
             return bots_pb2.BotSession()
    
    60 123
     
    
    61 124
         def UpdateBotSession(self, request, context):
    
    125
    +        """Handles UpdateBotSessionRequest messages.
    
    126
    +
    
    127
    +        Args:
    
    128
    +            request (UpdateBotSessionRequest): The incoming RPC request.
    
    129
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    130
    +        """
    
    62 131
             self.__logger.debug("UpdateBotSession request from [%s]", context.peer())
    
    63 132
     
    
    133
    +        names = request.name.split("/")
    
    134
    +        bot_status = BotStatus(request.bot_session.status)
    
    135
    +        bot_id = request.bot_session.bot_id
    
    136
    +
    
    64 137
             try:
    
    65
    -            names = request.name.split("/")
    
    66
    -            # Operation name should be in format:
    
    67
    -            # {instance/name}/{uuid}
    
    68
    -            instance_name = ''.join(names[0:-1])
    
    138
    +            instance_name = '/'.join(names[:-1])
    
    69 139
     
    
    70 140
                 instance = self._get_instance(instance_name)
    
    71
    -            return instance.update_bot_session(request.name,
    
    72
    -                                               request.bot_session)
    
    141
    +            bot_session = instance.update_bot_session(request.name,
    
    142
    +                                                      request.bot_session)
    
    143
    +
    
    144
    +            if self._is_instrumented:
    
    145
    +                self.__bots[bot_id].GetCurrentTime()
    
    146
    +                if bot_id not in self.__bots_by_status[bot_status]:
    
    147
    +                    if bot_status == BotStatus.OK:
    
    148
    +                        self.__bots_by_status[BotStatus.OK].add(bot_id)
    
    149
    +                        self.__bots_by_status[BotStatus.UNHEALTHY].discard(bot_id)
    
    150
    +
    
    151
    +                    elif bot_status == BotStatus.UNHEALTHY:
    
    152
    +                        self.__bots_by_status[BotStatus.OK].discard(bot_id)
    
    153
    +                        self.__bots_by_status[BotStatus.UNHEALTHY].add(bot_id)
    
    154
    +
    
    155
    +                    else:
    
    156
    +                        self.__bots_by_instance[instance_name].remove(bot_id)
    
    157
    +                        del self.__bots[bot_id]
    
    158
    +
    
    159
    +            return bot_session
    
    73 160
     
    
    74 161
             except InvalidArgumentError as e:
    
    75 162
                 self.__logger.error(e)
    
    ... ... @@ -89,10 +176,47 @@ class BotsService(bots_pb2_grpc.BotsServicer):
    89 176
             return bots_pb2.BotSession()
    
    90 177
     
    
    91 178
         def PostBotEventTemp(self, request, context):
    
    179
    +        """Handles PostBotEventTempRequest messages.
    
    180
    +
    
    181
    +        Args:
    
    182
    +            request (PostBotEventTempRequest): The incoming RPC request.
    
    183
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    184
    +        """
    
    92 185
             self.__logger.debug("PostBotEventTemp request from [%s]", context.peer())
    
    93 186
     
    
    94 187
             context.set_code(grpc.StatusCode.UNIMPLEMENTED)
    
    95
    -        return Empty()
    
    188
    +
    
    189
    +        return empty_pb2.Empty()
    
    190
    +
    
    191
    +    # --- Public API: Monitoring ---
    
    192
    +
    
    193
    +    @property
    
    194
    +    def is_instrumented(self):
    
    195
    +        return self._is_instrumented
    
    196
    +
    
    197
    +    def query_n_bots(self):
    
    198
    +        if self.__bots is not None:
    
    199
    +            return len(self.__bots)
    
    200
    +
    
    201
    +        return 0
    
    202
    +
    
    203
    +    def query_n_bots_for_instance(self, instance_name):
    
    204
    +        try:
    
    205
    +            if self.__bots_by_instance is not None:
    
    206
    +                return len(self.__bots_by_instance[instance_name])
    
    207
    +        except KeyError:
    
    208
    +            pass
    
    209
    +        return 0
    
    210
    +
    
    211
    +    def query_n_bots_for_status(self, bot_status):
    
    212
    +        try:
    
    213
    +            if self.__bots_by_status is not None:
    
    214
    +                return len(self.__bots_by_status[bot_status])
    
    215
    +        except KeyError:
    
    216
    +            pass
    
    217
    +        return 0
    
    218
    +
    
    219
    +    # --- Private API ---
    
    96 220
     
    
    97 221
         def _get_instance(self, name):
    
    98 222
             try:
    

  • buildgrid/server/execution/instance.py
    ... ... @@ -35,6 +35,10 @@ class ExecutionInstance:
    35 35
             self._storage = storage
    
    36 36
             self._scheduler = scheduler
    
    37 37
     
    
    38
    +    @property
    
    39
    +    def scheduler(self):
    
    40
    +        return self._scheduler
    
    41
    +
    
    38 42
         def register_instance_with_server(self, instance_name, server):
    
    39 43
             server.add_execution_instance(self, instance_name)
    
    40 44
     
    

  • buildgrid/server/execution/service.py
    ... ... @@ -33,30 +33,84 @@ from buildgrid._protos.google.longrunning import operations_pb2
    33 33
     
    
    34 34
     class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    
    35 35
     
    
    36
    -    def __init__(self, server):
    
    36
    +    def __init__(self, server, monitor=False):
    
    37 37
             self.__logger = logging.getLogger(__name__)
    
    38 38
     
    
    39
    +        self.__peers_by_instance = None
    
    40
    +        self.__peers = None
    
    41
    +
    
    39 42
             self._instances = {}
    
    43
    +
    
    40 44
             remote_execution_pb2_grpc.add_ExecutionServicer_to_server(self, server)
    
    41 45
     
    
    42
    -    def add_instance(self, name, instance):
    
    43
    -        self._instances[name] = instance
    
    46
    +        self._is_instrumented = monitor
    
    47
    +
    
    48
    +        if self._is_instrumented:
    
    49
    +            self.__peers_by_instance = {}
    
    50
    +            self.__peers = {}
    
    51
    +
    
    52
    +    # --- Public API ---
    
    53
    +
    
    54
    +    def add_instance(self, instance_name, instance):
    
    55
    +        """Registers a new servicer instance.
    
    56
    +
    
    57
    +        Args:
    
    58
    +            instance_name (str): The new instance's name.
    
    59
    +            instance (ExecutionInstance): The new instance itself.
    
    60
    +        """
    
    61
    +        self._instances[instance_name] = instance
    
    62
    +
    
    63
    +        if self._is_instrumented:
    
    64
    +            self.__peers_by_instance[instance_name] = set()
    
    65
    +
    
    66
    +    def get_scheduler(self, instance_name):
    
    67
    +        """Retrieves a reference to the scheduler for an instance.
    
    68
    +
    
    69
    +        Args:
    
    70
    +            instance_name (str): The name of the instance to query.
    
    71
    +
    
    72
    +        Returns:
    
    73
    +            Scheduler: A reference to the scheduler for `instance_name`.
    
    74
    +
    
    75
    +        Raises:
    
    76
    +            InvalidArgumentError: If no instance named `instance_name` exists.
    
    77
    +        """
    
    78
    +        instance = self._get_instance(instance_name)
    
    79
    +
    
    80
    +        return instance.scheduler
    
    81
    +
    
    82
    +    # --- Public API: Servicer ---
    
    44 83
     
    
    45 84
         def Execute(self, request, context):
    
    85
    +        """Handles ExecuteRequest messages.
    
    86
    +
    
    87
    +        Args:
    
    88
    +            request (ExecuteRequest): The incoming RPC request.
    
    89
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    90
    +        """
    
    46 91
             self.__logger.debug("Execute request from [%s]", context.peer())
    
    47 92
     
    
    93
    +        instance_name = request.instance_name
    
    94
    +        message_queue = queue.Queue()
    
    95
    +        peer = context.peer()
    
    96
    +
    
    48 97
             try:
    
    49
    -            message_queue = queue.Queue()
    
    50
    -            instance = self._get_instance(request.instance_name)
    
    98
    +            instance = self._get_instance(instance_name)
    
    51 99
                 operation = instance.execute(request.action_digest,
    
    52 100
                                              request.skip_cache_lookup,
    
    53 101
                                              message_queue)
    
    54 102
     
    
    55
    -            context.add_callback(partial(instance.unregister_message_client,
    
    56
    -                                         operation.name, message_queue))
    
    103
    +            context.add_callback(partial(self._rpc_termination_callback,
    
    104
    +                                         peer, instance_name, operation.name, message_queue))
    
    57 105
     
    
    58
    -            instanced_op_name = "{}/{}".format(request.instance_name,
    
    59
    -                                               operation.name)
    
    106
    +            if self._is_instrumented:
    
    107
    +                if peer not in self.__peers:
    
    108
    +                    self.__peers_by_instance[instance_name].add(peer)
    
    109
    +                    self.__peers[peer] = 1
    
    110
    +                else:
    
    111
    +                    self.__peers[peer] += 1
    
    112
    +
    
    113
    +            instanced_op_name = "{}/{}".format(instance_name, operation.name)
    
    60 114
     
    
    61 115
                 self.__logger.info("Operation name: [%s]", instanced_op_name)
    
    62 116
     
    
    ... ... @@ -86,23 +140,33 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    86 140
                 yield operations_pb2.Operation()
    
    87 141
     
    
    88 142
         def WaitExecution(self, request, context):
    
    89
    -        self.__logger.debug("WaitExecution request from [%s]", context.peer())
    
    143
    +        """Handles WaitExecutionRequest messages.
    
    90 144
     
    
    91
    -        try:
    
    92
    -            names = request.name.split("/")
    
    145
    +        Args:
    
    146
    +            request (WaitExecutionRequest): The incoming RPC request.
    
    147
    +            context (grpc.ServicerContext): Context for the RPC call.
    
    148
    +        """
    
    149
    +        self.__logger.debug("WaitExecution request from [%s]", context.peer())
    
    93 150
     
    
    94
    -            # Operation name should be in format:
    
    95
    -            # {instance/name}/{operation_id}
    
    96
    -            instance_name = ''.join(names[0:-1])
    
    151
    +        names = request.name.split('/')
    
    152
    +        instance_name = '/'.join(names[:-1])
    
    153
    +        operation_name = names[-1]
    
    154
    +        message_queue = queue.Queue()
    
    155
    +        peer = context.peer()
    
    97 156
     
    
    98
    -            message_queue = queue.Queue()
    
    99
    -            operation_name = names[-1]
    
    157
    +        try:
    
    100 158
                 instance = self._get_instance(instance_name)
    
    101 159
     
    
    102 160
                 instance.register_message_client(operation_name, message_queue)
    
    161
    +            context.add_callback(partial(self._rpc_termination_callback,
    
    162
    +                                         peer, instance_name, operation_name, message_queue))
    
    103 163
     
    
    104
    -            context.add_callback(partial(instance.unregister_message_client,
    
    105
    -                                         operation_name, message_queue))
    
    164
    +            if self._is_instrumented:
    
    165
    +                if peer not in self.__peers:
    
    166
    +                    self.__peers_by_instance[instance_name].add(peer)
    
    167
    +                    self.__peers[peer] = 1
    
    168
    +                else:
    
    169
    +                    self.__peers[peer] += 1
    
    106 170
     
    
    107 171
                 for operation in instance.stream_operation_updates(message_queue,
    
    108 172
                                                                    operation_name):
    
    ... ... @@ -123,6 +187,39 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
    123 187
                 context.set_code(grpc.StatusCode.CANCELLED)
    
    124 188
                 yield operations_pb2.Operation()
    
    125 189
     
    
    190
    +    # --- Public API: Monitoring ---
    
    191
    +
    
    192
    +    @property
    
    193
    +    def is_instrumented(self):
    
    194
    +        return self._is_instrumented
    
    195
    +
    
    196
    +    def query_n_clients(self):
    
    197
    +        if self.__peers is not None:
    
    198
    +            return len(self.__peers)
    
    199
    +        return 0
    
    200
    +
    
    201
    +    def query_n_clients_for_instance(self, instance_name):
    
    202
    +        try:
    
    203
    +            if self.__peers_by_instance is not None:
    
    204
    +                return len(self.__peers_by_instance[instance_name])
    
    205
    +        except KeyError:
    
    206
    +            pass
    
    207
    +        return 0
    
    208
    +
    
    209
    +    # --- Private API ---
    
    210
    +
    
    211
    +    def _rpc_termination_callback(self, peer, instance_name, job_name, message_queue):
    
    212
    +        instance = self._get_instance(instance_name)
    
    213
    +
    
    214
    +        instance.unregister_message_client(job_name, message_queue)
    
    215
    +
    
    216
    +        if self._is_instrumented:
    
    217
    +            if self.__peers[peer] > 1:
    
    218
    +                self.__peers[peer] -= 1
    
    219
    +            else:
    
    220
    +                self.__peers_by_instance[instance_name].remove(peer)
    
    221
    +                del self.__peers[peer]
    
    222
    +
    
    126 223
         def _get_instance(self, name):
    
    127 224
             try:
    
    128 225
                 return self._instances[name]
    

  • buildgrid/server/instance.py
    ... ... @@ -15,12 +15,19 @@
    15 15
     
    
    16 16
     import asyncio
    
    17 17
     from concurrent import futures
    
    18
    +from datetime import datetime, timedelta
    
    18 19
     import logging
    
    20
    +import logging.handlers
    
    19 21
     import os
    
    20 22
     import signal
    
    23
    +import sys
    
    24
    +import time
    
    21 25
     
    
    22 26
     import grpc
    
    27
    +import janus
    
    23 28
     
    
    29
    +from buildgrid._enums import BotStatus, LogRecordLevel, MetricRecordDomain, MetricRecordType
    
    30
    +from buildgrid._protos.buildgrid.v2 import monitoring_pb2
    
    24 31
     from buildgrid.server.actioncache.service import ActionCacheService
    
    25 32
     from buildgrid.server.bots.service import BotsService
    
    26 33
     from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
    
    ... ... @@ -28,6 +35,7 @@ from buildgrid.server.execution.service import ExecutionService
    28 35
     from buildgrid.server._monitoring import MonitoringBus, MonitoringOutputType, MonitoringOutputFormat
    
    29 36
     from buildgrid.server.operations.service import OperationsService
    
    30 37
     from buildgrid.server.referencestorage.service import ReferenceStorageService
    
    38
    +from buildgrid.settings import LOG_RECORD_FORMAT, MONITORING_PERIOD
    
    31 39
     
    
    32 40
     
    
    33 41
     class BuildGridServer:
    
    ... ... @@ -53,8 +61,20 @@ class BuildGridServer:
    53 61
             self.__grpc_server = grpc.server(self.__grpc_executor)
    
    54 62
     
    
    55 63
             self.__main_loop = asyncio.get_event_loop()
    
    64
    +
    
    56 65
             self.__monitoring_bus = None
    
    57 66
     
    
    67
    +        self.__logging_queue = janus.Queue(loop=self.__main_loop)
    
    68
    +        self.__logging_handler = logging.handlers.QueueHandler(self.__logging_queue.sync_q)
    
    69
    +        self.__logging_formatter = logging.Formatter(fmt=LOG_RECORD_FORMAT)
    
    70
    +        self.__print_log_records = True
    
    71
    +
    
    72
    +        self.__build_metadata_queues = None
    
    73
    +
    
    74
    +        self.__state_monitoring_task = None
    
    75
    +        self.__build_monitoring_tasks = None
    
    76
    +        self.__logging_task = None
    
    77
    +
    
    58 78
             self._execution_service = None
    
    59 79
             self._bots_service = None
    
    60 80
             self._operations_service = None
    
    ... ... @@ -63,6 +83,9 @@ class BuildGridServer:
    63 83
             self._cas_service = None
    
    64 84
             self._bytestream_service = None
    
    65 85
     
    
    86
    +        self._schedulers = {}
    
    87
    +        self._instances = set()
    
    88
    +
    
    66 89
             self._is_instrumented = monitor
    
    67 90
     
    
    68 91
             if self._is_instrumented:
    
    ... ... @@ -70,6 +93,19 @@ class BuildGridServer:
    70 93
                     self.__main_loop, endpoint_type=MonitoringOutputType.STDOUT,
    
    71 94
                     serialisation_format=MonitoringOutputFormat.JSON)
    
    72 95
     
    
    96
    +            self.__build_monitoring_tasks = []
    
    97
    +
    
    98
    +        # Setup the main logging handler:
    
    99
    +        root_logger = logging.getLogger()
    
    100
    +
    
    101
    +        for log_filter in root_logger.filters[:]:
    
    102
    +            self.__logging_handler.addFilter(log_filter)
    
    103
    +            root_logger.removeFilter(log_filter)
    
    104
    +
    
    105
    +        for log_handler in root_logger.handlers[:]:
    
    106
    +            root_logger.removeHandler(log_handler)
    
    107
    +        root_logger.addHandler(self.__logging_handler)
    
    108
    +
    
    73 109
         # --- Public API ---
    
    74 110
     
    
    75 111
         def start(self):
    
    ... ... @@ -79,6 +115,25 @@ class BuildGridServer:
    79 115
             if self._is_instrumented:
    
    80 116
                 self.__monitoring_bus.start()
    
    81 117
     
    
    118
    +            self.__state_monitoring_task = asyncio.ensure_future(
    
    119
    +                self._state_monitoring_worker(period=MONITORING_PERIOD),
    
    120
    +                loop=self.__main_loop)
    
    121
    +
    
    122
    +            self.__build_monitoring_tasks.clear()
    
    123
    +            for instance_name, scheduler in self._schedulers.items():
    
    124
    +                if not scheduler.is_instrumented:
    
    125
    +                    continue
    
    126
    +
    
    127
    +                message_queue = janus.Queue(loop=self.__main_loop)
    
    128
    +                scheduler.register_build_metadata_watcher(message_queue.sync_q)
    
    129
    +
    
    130
    +                self.__build_monitoring_tasks.append(asyncio.ensure_future(
    
    131
    +                    self._build_monitoring_worker(instance_name, message_queue),
    
    132
    +                    loop=self.__main_loop))
    
    133
    +
    
    134
    +        self.__logging_task = asyncio.ensure_future(
    
    135
    +            self._logging_worker(), loop=self.__main_loop)
    
    136
    +
    
    82 137
             self.__main_loop.add_signal_handler(signal.SIGTERM, self.stop)
    
    83 138
     
    
    84 139
             self.__main_loop.run_forever()
    
    ... ... @@ -86,8 +141,18 @@ class BuildGridServer:
    86 141
         def stop(self):
    
    87 142
             """Stops the BuildGrid server."""
    
    88 143
             if self._is_instrumented:
    
    144
    +            if self.__state_monitoring_task is not None:
    
    145
    +                self.__state_monitoring_task.cancel()
    
    146
    +
    
    147
    +            for build_monitoring_task in self.__build_monitoring_tasks:
    
    148
    +                build_monitoring_task.cancel()
    
    149
    +            self.__build_monitoring_tasks.clear()
    
    150
    +
    
    89 151
                 self.__monitoring_bus.stop()
    
    90 152
     
    
    153
    +        if self.__logging_task is not None:
    
    154
    +            self.__logging_task.cancel()
    
    155
    +
    
    91 156
             self.__main_loop.stop()
    
    92 157
     
    
    93 158
             self.__grpc_server.stop(None)
    
    ... ... @@ -125,10 +190,14 @@ class BuildGridServer:
    125 190
                 instance_name (str): Instance name.
    
    126 191
             """
    
    127 192
             if self._execution_service is None:
    
    128
    -            self._execution_service = ExecutionService(self.__grpc_server)
    
    193
    +            self._execution_service = ExecutionService(
    
    194
    +                self.__grpc_server, monitor=self._is_instrumented)
    
    129 195
     
    
    130 196
             self._execution_service.add_instance(instance_name, instance)
    
    131 197
     
    
    198
    +        self._schedulers[instance_name] = instance.scheduler
    
    199
    +        self._instances.add(instance_name)
    
    200
    +
    
    132 201
         def add_bots_interface(self, instance, instance_name):
    
    133 202
             """Adds a :obj:`BotsInterface` to the service.
    
    134 203
     
    
    ... ... @@ -139,10 +208,13 @@ class BuildGridServer:
    139 208
                 instance_name (str): Instance name.
    
    140 209
             """
    
    141 210
             if self._bots_service is None:
    
    142
    -            self._bots_service = BotsService(self.__grpc_server)
    
    211
    +            self._bots_service = BotsService(
    
    212
    +                self.__grpc_server, monitor=self._is_instrumented)
    
    143 213
     
    
    144 214
             self._bots_service.add_instance(instance_name, instance)
    
    145 215
     
    
    216
    +        self._instances.add(instance_name)
    
    217
    +
    
    146 218
         def add_operations_instance(self, instance, instance_name):
    
    147 219
             """Adds an :obj:`OperationsInstance` to the service.
    
    148 220
     
    
    ... ... @@ -218,3 +290,254 @@ class BuildGridServer:
    218 290
         @property
    
    219 291
         def is_instrumented(self):
    
    220 292
             return self._is_instrumented
    
    293
    +
    
    294
    +    # --- Private API ---
    
    295
    +
    
    296
    +    async def _logging_worker(self):
    
    297
    +        """Publishes log records to the monitoring bus."""
    
    298
    +        async def __logging_worker():
    
    299
    +            log_record = await self.__logging_queue.async_q.get()
    
    300
    +
    
    301
    +            # Print log records to stdout, if required:
    
    302
    +            if self.__print_log_records:
    
    303
    +                record = self.__logging_formatter.format(log_record)
    
    304
    +
    
    305
    +                # TODO: Investigate if async write would be worth here.
    
    306
    +                sys.stdout.write('{}\n'.format(record))
    
    307
    +                sys.stdout.flush()
    
    308
    +
    
    309
    +            # Emit a log record if server is instrumented:
    
    310
    +            if self._is_instrumented:
    
    311
    +                log_record_level = LogRecordLevel(int(log_record.levelno / 10))
    
    312
    +                log_record_creation_time = datetime.fromtimestamp(log_record.created)
    
    313
    +                # logging.LogRecord.extra must be a str to str dict:
    
    314
    +                if 'extra' in log_record.__dict__ and log_record.extra:
    
    315
    +                    log_record_metadata = log_record.extra
    
    316
    +                else:
    
    317
    +                    log_record_metadata = None
    
    318
    +                record = self._forge_log_record(
    
    319
    +                    log_record.name, log_record_level, log_record.message,
    
    320
    +                    log_record_creation_time, metadata=log_record_metadata)
    
    321
    +
    
    322
    +                await self.__monitoring_bus.send_record(record)
    
    323
    +
    
    324
    +        try:
    
    325
    +            while True:
    
    326
    +                await __logging_worker()
    
    327
    +
    
    328
    +        except asyncio.CancelledError:
    
    329
    +            pass
    
    330
    +
    
    331
    +    def _forge_log_record(self, domain, level, message, creation_time, metadata=None):
    
    332
    +        log_record = monitoring_pb2.LogRecord()
    
    333
    +
    
    334
    +        log_record.creation_timestamp.FromDatetime(creation_time)
    
    335
    +        log_record.domain = domain
    
    336
    +        log_record.level = level.value
    
    337
    +        log_record.message = message
    
    338
    +        if metadata is not None:
    
    339
    +            log_record.metadata.update(metadata)
    
    340
    +
    
    341
    +        return log_record
    
    342
    +
    
    343
    +    async def _build_monitoring_worker(self, instance_name, message_queue):
    
    344
    +        """Publishes builds metadata to the monitoring bus."""
    
    345
    +        async def __build_monitoring_worker():
    
    346
    +            metadata = await message_queue.async_q.get()
    
    347
    +
    
    348
    +            # Emit build inputs fetching time record:
    
    349
    +            fetch_start = metadata.input_fetch_start_timestamp.ToDatetime()
    
    350
    +            fetch_completed = metadata.input_fetch_completed_timestamp.ToDatetime()
    
    351
    +            input_fetch_time = fetch_completed - fetch_start
    
    352
    +            timer_record = self._forge_timer_metric_record(
    
    353
    +                MetricRecordDomain.BUILD, 'inputs-fetching-time', input_fetch_time,
    
    354
    +                metadata={'instance-name': instance_name or 'void'})
    
    355
    +
    
    356
    +            await self.__monitoring_bus.send_record(timer_record)
    
    357
    +
    
    358
    +            # Emit build execution time record:
    
    359
    +            execution_start = metadata.execution_start_timestamp.ToDatetime()
    
    360
    +            execution_completed = metadata.execution_completed_timestamp.ToDatetime()
    
    361
    +            execution_time = execution_completed - execution_start
    
    362
    +            timer_record = self._forge_timer_metric_record(
    
    363
    +                MetricRecordDomain.BUILD, 'execution-time', execution_time,
    
    364
    +                metadata={'instance-name': instance_name or 'void'})
    
    365
    +
    
    366
    +            await self.__monitoring_bus.send_record(timer_record)
    
    367
    +
    
    368
    +            # Emit build outputs uploading time record:
    
    369
    +            upload_start = metadata.output_upload_start_timestamp.ToDatetime()
    
    370
    +            upload_completed = metadata.output_upload_completed_timestamp.ToDatetime()
    
    371
    +            output_upload_time = upload_completed - upload_start
    
    372
    +            timer_record = self._forge_timer_metric_record(
    
    373
    +                MetricRecordDomain.BUILD, 'outputs-uploading-time', output_upload_time,
    
    374
    +                metadata={'instance-name': instance_name or 'void'})
    
    375
    +
    
    376
    +            await self.__monitoring_bus.send_record(timer_record)
    
    377
    +
    
    378
    +            # Emit total build handling time record:
    
    379
    +            queued = metadata.queued_timestamp.ToDatetime()
    
    380
    +            worker_completed = metadata.worker_completed_timestamp.ToDatetime()
    
    381
    +            total_handling_time = worker_completed - queued
    
    382
    +            timer_record = self._forge_timer_metric_record(
    
    383
    +                MetricRecordDomain.BUILD, 'total-handling-time', total_handling_time,
    
    384
    +                metadata={'instance-name': instance_name or 'void'})
    
    385
    +
    
    386
    +            await self.__monitoring_bus.send_record(timer_record)
    
    387
    +
    
    388
    +        try:
    
    389
    +            while True:
    
    390
    +                await __build_monitoring_worker()
    
    391
    +
    
    392
    +        except asyncio.CancelledError:
    
    393
    +            pass
    
    394
    +
    
    395
    +    async def _state_monitoring_worker(self, period=1.0):
    
    396
    +        """Periodically publishes state metrics to the monitoring bus."""
    
    397
    +        async def __state_monitoring_worker():
    
    398
    +            # Emit total clients count record:
    
    399
    +            _, record = self._query_n_clients()
    
    400
    +            await self.__monitoring_bus.send_record(record)
    
    401
    +
    
    402
    +            # Emit total bots count record:
    
    403
    +            _, record = self._query_n_bots()
    
    404
    +            await self.__monitoring_bus.send_record(record)
    
    405
    +
    
    406
    +            queue_times = []
    
    407
    +            # Emits records by instance:
    
    408
    +            for instance_name in self._instances:
    
    409
    +                # Emit instance clients count record:
    
    410
    +                _, record = self._query_n_clients_for_instance(instance_name)
    
    411
    +                await self.__monitoring_bus.send_record(record)
    
    412
    +
    
    413
    +                # Emit instance bots count record:
    
    414
    +                _, record = self._query_n_bots_for_instance(instance_name)
    
    415
    +                await self.__monitoring_bus.send_record(record)
    
    416
    +
    
    417
    +                # Emit instance average queue time record:
    
    418
    +                queue_time, record = self._query_am_queue_time_for_instance(instance_name)
    
    419
    +                await self.__monitoring_bus.send_record(record)
    
    420
    +                if queue_time:
    
    421
    +                    queue_times.append(queue_time)
    
    422
    +
    
    423
    +            # Emits records by bot status:
    
    424
    +            for bot_status in [BotStatus.OK, BotStatus.UNHEALTHY]:
    
    425
    +                # Emit status bots count record:
    
    426
    +                _, record = self._query_n_bots_for_status(bot_status)
    
    427
    +                await self.__monitoring_bus.send_record(record)
    
    428
    +
    
    429
    +            # Emit overall average queue time record:
    
    430
    +            if queue_times:
    
    431
    +                am_queue_time = sum(queue_times, timedelta()) / len(queue_times)
    
    432
    +            else:
    
    433
    +                am_queue_time = timedelta()
    
    434
    +            record = self._forge_timer_metric_record(
    
    435
    +                MetricRecordDomain.STATE,
    
    436
    +                'average-queue-time',
    
    437
    +                am_queue_time)
    
    438
    +
    
    439
    +            await self.__monitoring_bus.send_record(record)
    
    440
    +
    
    441
    +        try:
    
    442
    +            while True:
    
    443
    +                start = time.time()
    
    444
    +                await __state_monitoring_worker()
    
    445
    +
    
    446
    +                end = time.time()
    
    447
    +                await asyncio.sleep(period - (end - start))
    
    448
    +
    
    449
    +        except asyncio.CancelledError:
    
    450
    +            pass
    
    451
    +
    
    452
    +    def _forge_counter_metric_record(self, domain, name, count, metadata=None):
    
    453
    +        counter_record = monitoring_pb2.MetricRecord()
    
    454
    +
    
    455
    +        counter_record.creation_timestamp.GetCurrentTime()
    
    456
    +        counter_record.domain = domain.value
    
    457
    +        counter_record.type = MetricRecordType.COUNTER.value
    
    458
    +        counter_record.name = name
    
    459
    +        counter_record.count = count
    
    460
    +        if metadata is not None:
    
    461
    +            counter_record.metadata.update(metadata)
    
    462
    +
    
    463
    +        return counter_record
    
    464
    +
    
    465
    +    def _forge_timer_metric_record(self, domain, name, duration, metadata=None):
    
    466
    +        timer_record = monitoring_pb2.MetricRecord()
    
    467
    +
    
    468
    +        timer_record.creation_timestamp.GetCurrentTime()
    
    469
    +        timer_record.domain = domain.value
    
    470
    +        timer_record.type = MetricRecordType.TIMER.value
    
    471
    +        timer_record.name = name
    
    472
    +        timer_record.duration.FromTimedelta(duration)
    
    473
    +        if metadata is not None:
    
    474
    +            timer_record.metadata.update(metadata)
    
    475
    +
    
    476
    +        return timer_record
    
    477
    +
    
    478
    +    def _forge_gauge_metric_record(self, domain, name, value, metadata=None):
    
    479
    +        gauge_record = monitoring_pb2.MetricRecord()
    
    480
    +
    
    481
    +        gauge_record.creation_timestamp.GetCurrentTime()
    
    482
    +        gauge_record.domain = domain.value
    
    483
    +        gauge_record.type = MetricRecordType.GAUGE.value
    
    484
    +        gauge_record.name = name
    
    485
    +        gauge_record.value = value
    
    486
    +        if metadata is not None:
    
    487
    +            gauge_record.metadata.update(metadata)
    
    488
    +
    
    489
    +        return gauge_record
    
    490
    +
    
    491
    +    # --- Private API: Monitoring ---
    
    492
    +
    
    493
    +    def _query_n_clients(self):
    
    494
    +        """Queries the number of clients connected."""
    
    495
    +        n_clients = self._execution_service.query_n_clients()
    
    496
    +        gauge_record = self._forge_gauge_metric_record(
    
    497
    +            MetricRecordDomain.STATE, 'clients-count', n_clients)
    
    498
    +
    
    499
    +        return n_clients, gauge_record
    
    500
    +
    
    501
    +    def _query_n_clients_for_instance(self, instance_name):
    
    502
    +        """Queries the number of clients connected for a given instance"""
    
    503
    +        n_clients = self._execution_service.query_n_clients_for_instance(instance_name)
    
    504
    +        gauge_record = self._forge_gauge_metric_record(
    
    505
    +            MetricRecordDomain.STATE, 'clients-count', n_clients,
    
    506
    +            metadata={'instance-name': instance_name or 'void'})
    
    507
    +
    
    508
    +        return n_clients, gauge_record
    
    509
    +
    
    510
    +    def _query_n_bots(self):
    
    511
    +        """Queries the number of bots connected."""
    
    512
    +        n_bots = self._bots_service.query_n_bots()
    
    513
    +        gauge_record = self._forge_gauge_metric_record(
    
    514
    +            MetricRecordDomain.STATE, 'bots-count', n_bots)
    
    515
    +
    
    516
    +        return n_bots, gauge_record
    
    517
    +
    
    518
    +    def _query_n_bots_for_instance(self, instance_name):
    
    519
    +        """Queries the number of bots connected for a given instance."""
    
    520
    +        n_bots = self._bots_service.query_n_bots_for_instance(instance_name)
    
    521
    +        gauge_record = self._forge_gauge_metric_record(
    
    522
    +            MetricRecordDomain.STATE, 'bots-count', n_bots,
    
    523
    +            metadata={'instance-name': instance_name or 'void'})
    
    524
    +
    
    525
    +        return n_bots, gauge_record
    
    526
    +
    
    527
    +    def _query_n_bots_for_status(self, bot_status):
    
    528
    +        """Queries the number of bots connected for a given health status."""
    
    529
    +        n_bots = self._bots_service.query_n_bots_for_status(bot_status)
    
    530
    +        gauge_record = self._forge_gauge_metric_record(
    
    531
    +            MetricRecordDomain.STATE, 'bots-count', n_bots,
    
    532
    +            metadata={'bot-status': bot_status.name})
    
    533
    +
    
    534
    +        return n_bots, gauge_record
    
    535
    +
    
    536
    +    def _query_am_queue_time_for_instance(self, instance_name):
    
    537
    +        """Queries the average job's queue time for a given instance."""
    
    538
    +        am_queue_time = self._schedulers[instance_name].query_am_queue_time()
    
    539
    +        timer_record = self._forge_timer_metric_record(
    
    540
    +            MetricRecordDomain.STATE, 'average-queue-time', am_queue_time,
    
    541
    +            metadata={'instance-name': instance_name or 'void'})
    
    542
    +
    
    543
    +        return am_queue_time, timer_record

  • buildgrid/server/job.py
    ... ... @@ -13,10 +13,11 @@
    13 13
     # limitations under the License.
    
    14 14
     
    
    15 15
     
    
    16
    +from datetime import datetime
    
    16 17
     import logging
    
    17 18
     import uuid
    
    18 19
     
    
    19
    -from google.protobuf import timestamp_pb2
    
    20
    +from google.protobuf import duration_pb2, timestamp_pb2
    
    20 21
     
    
    21 22
     from buildgrid._enums import LeaseState, OperationStage
    
    22 23
     from buildgrid._exceptions import CancelledError
    
    ... ... @@ -40,6 +41,7 @@ class Job:
    40 41
             self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata()
    
    41 42
     
    
    42 43
             self.__queued_timestamp = timestamp_pb2.Timestamp()
    
    44
    +        self.__queued_time_duration = duration_pb2.Duration()
    
    43 45
             self.__worker_start_timestamp = timestamp_pb2.Timestamp()
    
    44 46
             self.__worker_completed_timestamp = timestamp_pb2.Timestamp()
    
    45 47
     
    
    ... ... @@ -56,6 +58,8 @@ class Job:
    56 58
             self._operation.done = False
    
    57 59
             self._n_tries = 0
    
    58 60
     
    
    61
    +    # --- Public API ---
    
    62
    +
    
    59 63
         @property
    
    60 64
         def name(self):
    
    61 65
             return self._name
    
    ... ... @@ -79,6 +83,13 @@ class Job:
    79 83
             else:
    
    80 84
                 return None
    
    81 85
     
    
    86
    +    @property
    
    87
    +    def holds_cached_action_result(self):
    
    88
    +        if self.__execute_response is not None:
    
    89
    +            return self.__execute_response.cached_result
    
    90
    +        else:
    
    91
    +            return False
    
    92
    +
    
    82 93
         @property
    
    83 94
         def operation(self):
    
    84 95
             return self._operation
    
    ... ... @@ -193,7 +204,7 @@ class Job:
    193 204
                     result.Unpack(action_result)
    
    194 205
     
    
    195 206
                 action_metadata = action_result.execution_metadata
    
    196
    -            action_metadata.queued_timestamp.CopyFrom(self.__worker_start_timestamp)
    
    207
    +            action_metadata.queued_timestamp.CopyFrom(self.__queued_timestamp)
    
    197 208
                 action_metadata.worker_start_timestamp.CopyFrom(self.__worker_start_timestamp)
    
    198 209
                 action_metadata.worker_completed_timestamp.CopyFrom(self.__worker_completed_timestamp)
    
    199 210
     
    
    ... ... @@ -227,6 +238,10 @@ class Job:
    227 238
                     self.__queued_timestamp.GetCurrentTime()
    
    228 239
                 self._n_tries += 1
    
    229 240
     
    
    241
    +        elif self.__operation_metadata.stage == OperationStage.EXECUTING.value:
    
    242
    +            queue_in, queue_out = self.__queued_timestamp.ToDatetime(), datetime.now()
    
    243
    +            self.__queued_time_duration.FromTimedelta(queue_out - queue_in)
    
    244
    +
    
    230 245
             elif self.__operation_metadata.stage == OperationStage.COMPLETED.value:
    
    231 246
                 if self.__execute_response is not None:
    
    232 247
                     self._operation.response.Pack(self.__execute_response)
    
    ... ... @@ -260,3 +275,11 @@ class Job:
    260 275
             self.__execute_response.status.message = "Operation cancelled by client."
    
    261 276
     
    
    262 277
             self.update_operation_stage(OperationStage.COMPLETED)
    
    278
    +
    
    279
    +    # --- Public API: Monitoring ---
    
    280
    +
    
    281
    +    def query_queue_time(self):
    
    282
    +        return self.__queued_time_duration.ToTimedelta()
    
    283
    +
    
    284
    +    def query_n_retries(self):
    
    285
    +        return self._n_tries - 1 if self._n_tries > 0 else 0

  • buildgrid/server/operations/instance.py
    ... ... @@ -32,6 +32,10 @@ class OperationsInstance:
    32 32
     
    
    33 33
             self._scheduler = scheduler
    
    34 34
     
    
    35
    +    @property
    
    36
    +    def scheduler(self):
    
    37
    +        return self._scheduler
    
    38
    +
    
    35 39
         def register_instance_with_server(self, instance_name, server):
    
    36 40
             server.add_operations_instance(self, instance_name)
    
    37 41
     
    

  • buildgrid/server/operations/service.py
    ... ... @@ -38,8 +38,18 @@ class OperationsService(operations_pb2_grpc.OperationsServicer):
    38 38
     
    
    39 39
             operations_pb2_grpc.add_OperationsServicer_to_server(self, server)
    
    40 40
     
    
    41
    -    def add_instance(self, name, instance):
    
    42
    -        self._instances[name] = instance
    
    41
    +    # --- Public API ---
    
    42
    +
    
    43
    +    def add_instance(self, instance_name, instance):
    
    44
    +        """Registers a new servicer instance.
    
    45
    +
    
    46
    +        Args:
    
    47
    +            instance_name (str): The new instance's name.
    
    48
    +            instance (OperationsInstance): The new instance itself.
    
    49
    +        """
    
    50
    +        self._instances[instance_name] = instance
    
    51
    +
    
    52
    +    # --- Public API: Servicer ---
    
    43 53
     
    
    44 54
         def GetOperation(self, request, context):
    
    45 55
             self.__logger.debug("GetOperation request from [%s]", context.peer())
    
    ... ... @@ -127,6 +137,8 @@ class OperationsService(operations_pb2_grpc.OperationsServicer):
    127 137
     
    
    128 138
             return Empty()
    
    129 139
     
    
    140
    +    # --- Private API ---
    
    141
    +
    
    130 142
         def _parse_instance_name(self, name):
    
    131 143
             """ If the instance name is not blank, 'name' will have the form
    
    132 144
             {instance_name}/{operation_uuid}. Otherwise, it will just be
    

  • buildgrid/server/scheduler.py
    ... ... @@ -20,33 +20,74 @@ Schedules jobs.
    20 20
     """
    
    21 21
     
    
    22 22
     from collections import deque
    
    23
    +from datetime import timedelta
    
    23 24
     import logging
    
    24 25
     
    
    26
    +from buildgrid._enums import LeaseState, OperationStage
    
    25 27
     from buildgrid._exceptions import NotFoundError
    
    26 28
     
    
    27
    -from .job import OperationStage, LeaseState
    
    28
    -
    
    29 29
     
    
    30 30
     class Scheduler:
    
    31 31
     
    
    32 32
         MAX_N_TRIES = 5
    
    33 33
     
    
    34
    -    def __init__(self, action_cache=None):
    
    34
    +    def __init__(self, action_cache=None, monitor=False):
    
    35 35
             self.__logger = logging.getLogger(__name__)
    
    36 36
     
    
    37
    +        self.__build_metadata_queues = None
    
    38
    +
    
    39
    +        self.__operations_by_stage = None
    
    40
    +        self.__leases_by_state = None
    
    41
    +        self.__queue_time_average = None
    
    42
    +        self.__retries_count = 0
    
    43
    +
    
    37 44
             self._action_cache = action_cache
    
    38 45
             self.jobs = {}
    
    39 46
             self.queue = deque()
    
    40 47
     
    
    48
    +        self._is_instrumented = monitor
    
    49
    +
    
    50
    +        if self._is_instrumented:
    
    51
    +            self.__build_metadata_queues = []
    
    52
    +
    
    53
    +            self.__operations_by_stage = {}
    
    54
    +            self.__leases_by_state = {}
    
    55
    +            self.__queue_time_average = 0, timedelta()
    
    56
    +
    
    57
    +            self.__operations_by_stage[OperationStage.CACHE_CHECK] = set()
    
    58
    +            self.__operations_by_stage[OperationStage.QUEUED] = set()
    
    59
    +            self.__operations_by_stage[OperationStage.EXECUTING] = set()
    
    60
    +            self.__operations_by_stage[OperationStage.COMPLETED] = set()
    
    61
    +
    
    62
    +            self.__leases_by_state[LeaseState.PENDING] = set()
    
    63
    +            self.__leases_by_state[LeaseState.ACTIVE] = set()
    
    64
    +            self.__leases_by_state[LeaseState.COMPLETED] = set()
    
    65
    +
    
    66
    +    # --- Public API ---
    
    67
    +
    
    41 68
         def register_client(self, job_name, queue):
    
    42
    -        self.jobs[job_name].register_client(queue)
    
    69
    +        job = self.jobs[job_name]
    
    70
    +
    
    71
    +        job.register_client(queue)
    
    43 72
     
    
    44 73
         def unregister_client(self, job_name, queue):
    
    45
    -        self.jobs[job_name].unregister_client(queue)
    
    74
    +        job = self.jobs[job_name]
    
    75
    +
    
    76
    +        job.unregister_client(queue)
    
    46 77
     
    
    47
    -        if not self.jobs[job_name].n_clients and self.jobs[job_name].operation.done:
    
    78
    +        if not job.n_clients and job.operation.done:
    
    48 79
                 del self.jobs[job_name]
    
    49 80
     
    
    81
    +            if self._is_instrumented:
    
    82
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    83
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    84
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    85
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    86
    +
    
    87
    +                self.__leases_by_state[LeaseState.PENDING].discard(job_name)
    
    88
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(job_name)
    
    89
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(job_name)
    
    90
    +
    
    50 91
         def queue_job(self, job, skip_cache_lookup=False):
    
    51 92
             self.jobs[job.name] = job
    
    52 93
     
    
    ... ... @@ -62,23 +103,30 @@ class Scheduler:
    62 103
                     job.set_cached_result(action_result)
    
    63 104
                     operation_stage = OperationStage.COMPLETED
    
    64 105
     
    
    106
    +                if self._is_instrumented:
    
    107
    +                    self.__retries_count += 1
    
    108
    +
    
    65 109
             else:
    
    66 110
                 operation_stage = OperationStage.QUEUED
    
    67 111
                 self.queue.append(job)
    
    68 112
     
    
    69
    -        job.update_operation_stage(operation_stage)
    
    113
    +        self._update_job_operation_stage(job.name, operation_stage)
    
    70 114
     
    
    71 115
         def retry_job(self, job_name):
    
    72
    -        if job_name in self.jobs:
    
    73
    -            job = self.jobs[job_name]
    
    74
    -            if job.n_tries >= self.MAX_N_TRIES:
    
    75
    -                # TODO: Decide what to do with these jobs
    
    76
    -                job.update_operation_stage(OperationStage.COMPLETED)
    
    77
    -                # TODO: Mark these jobs as done
    
    78
    -            else:
    
    79
    -                job.update_operation_stage(OperationStage.QUEUED)
    
    80
    -                job.update_lease_state(LeaseState.PENDING)
    
    81
    -                self.queue.append(job)
    
    116
    +        job = self.jobs[job_name]
    
    117
    +
    
    118
    +        operation_stage = None
    
    119
    +        if job.n_tries >= self.MAX_N_TRIES:
    
    120
    +            # TODO: Decide what to do with these jobs
    
    121
    +            operation_stage = OperationStage.COMPLETED
    
    122
    +            # TODO: Mark these jobs as done
    
    123
    +
    
    124
    +        else:
    
    125
    +            operation_stage = OperationStage.QUEUED
    
    126
    +            job.update_lease_state(LeaseState.PENDING)
    
    127
    +            self.queue.append(job)
    
    128
    +
    
    129
    +        self._update_job_operation_stage(job_name, operation_stage)
    
    82 130
     
    
    83 131
         def list_jobs(self):
    
    84 132
             return self.jobs.values()
    
    ... ... @@ -118,17 +166,27 @@ class Scheduler:
    118 166
                 lease_result (google.protobuf.Any): the lease execution result, only
    
    119 167
                     required if `lease_state` is `COMPLETED`.
    
    120 168
             """
    
    121
    -
    
    122 169
             job = self.jobs[lease.id]
    
    123 170
             lease_state = LeaseState(lease.state)
    
    124 171
     
    
    172
    +        operation_stage = None
    
    125 173
             if lease_state == LeaseState.PENDING:
    
    126 174
                 job.update_lease_state(LeaseState.PENDING)
    
    127
    -            job.update_operation_stage(OperationStage.QUEUED)
    
    175
    +            operation_stage = OperationStage.QUEUED
    
    176
    +
    
    177
    +            if self._is_instrumented:
    
    178
    +                self.__leases_by_state[LeaseState.PENDING].add(lease.id)
    
    179
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
    
    180
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
    
    128 181
     
    
    129 182
             elif lease_state == LeaseState.ACTIVE:
    
    130 183
                 job.update_lease_state(LeaseState.ACTIVE)
    
    131
    -            job.update_operation_stage(OperationStage.EXECUTING)
    
    184
    +            operation_stage = OperationStage.EXECUTING
    
    185
    +
    
    186
    +            if self._is_instrumented:
    
    187
    +                self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
    
    188
    +                self.__leases_by_state[LeaseState.ACTIVE].add(lease.id)
    
    189
    +                self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
    
    132 190
     
    
    133 191
             elif lease_state == LeaseState.COMPLETED:
    
    134 192
                 job.update_lease_state(LeaseState.COMPLETED,
    
    ... ... @@ -137,7 +195,14 @@ class Scheduler:
    137 195
                 if self._action_cache is not None and not job.do_not_cache:
    
    138 196
                     self._action_cache.update_action_result(job.action_digest, job.action_result)
    
    139 197
     
    
    140
    -            job.update_operation_stage(OperationStage.COMPLETED)
    
    198
    +            operation_stage = OperationStage.COMPLETED
    
    199
    +
    
    200
    +            if self._is_instrumented:
    
    201
    +                self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
    
    202
    +                self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
    
    203
    +                self.__leases_by_state[LeaseState.COMPLETED].add(lease.id)
    
    204
    +
    
    205
    +        self._update_job_operation_stage(lease.id, operation_stage)
    
    141 206
     
    
    142 207
         def get_job_lease(self, job_name):
    
    143 208
             """Returns the lease associated to job, if any have been emitted yet."""
    
    ... ... @@ -160,3 +225,109 @@ class Scheduler:
    160 225
                 job_name (str): name of the job holding the operation to cancel.
    
    161 226
             """
    
    162 227
             self.jobs[job_name].cancel_operation()
    
    228
    +
    
    229
    +    # --- Public API: Monitoring ---
    
    230
    +
    
    231
    +    @property
    
    232
    +    def is_instrumented(self):
    
    233
    +        return self._is_instrumented
    
    234
    +
    
    235
    +    def register_build_metadata_watcher(self, message_queue):
    
    236
    +        if self.__build_metadata_queues is not None:
    
    237
    +            self.__build_metadata_queues.append(message_queue)
    
    238
    +
    
    239
    +    def query_n_jobs(self):
    
    240
    +        return len(self.jobs)
    
    241
    +
    
    242
    +    def query_n_operations(self):
    
    243
    +        # For now n_operations == n_jobs:
    
    244
    +        return len(self.jobs)
    
    245
    +
    
    246
    +    def query_n_operations_by_stage(self, operation_stage):
    
    247
    +        try:
    
    248
    +            if self.__operations_by_stage is not None:
    
    249
    +                return len(self.__operations_by_stage[operation_stage])
    
    250
    +        except KeyError:
    
    251
    +            pass
    
    252
    +        return 0
    
    253
    +
    
    254
    +    def query_n_leases(self):
    
    255
    +        return len(self.jobs)
    
    256
    +
    
    257
    +    def query_n_leases_by_state(self, lease_state):
    
    258
    +        try:
    
    259
    +            if self.__leases_by_state is not None:
    
    260
    +                return len(self.__leases_by_state[lease_state])
    
    261
    +        except KeyError:
    
    262
    +            pass
    
    263
    +        return 0
    
    264
    +
    
    265
    +    def query_n_retries(self):
    
    266
    +        return self.__retries_count
    
    267
    +
    
    268
    +    def query_am_queue_time(self):
    
    269
    +        if self.__queue_time_average is not None:
    
    270
    +            return self.__queue_time_average[1]
    
    271
    +        return timedelta()
    
    272
    +
    
    273
    +    # --- Private API ---
    
    274
    +
    
    275
    +    def _update_job_operation_stage(self, job_name, operation_stage):
    
    276
    +        """Requests a stage transition for the job's :class:Operations.
    
    277
    +
    
    278
    +        Args:
    
    279
    +            job_name (str): name of the job to query.
    
    280
    +            operation_stage (OperationStage): the stage to transition to.
    
    281
    +        """
    
    282
    +        job = self.jobs[job_name]
    
    283
    +
    
    284
    +        if operation_stage == OperationStage.CACHE_CHECK:
    
    285
    +            job.update_operation_stage(OperationStage.CACHE_CHECK)
    
    286
    +
    
    287
    +            if self._is_instrumented:
    
    288
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].add(job_name)
    
    289
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    290
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    291
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    292
    +
    
    293
    +        elif operation_stage == OperationStage.QUEUED:
    
    294
    +            job.update_operation_stage(OperationStage.QUEUED)
    
    295
    +
    
    296
    +            if self._is_instrumented:
    
    297
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    298
    +                self.__operations_by_stage[OperationStage.QUEUED].add(job_name)
    
    299
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    300
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    301
    +
    
    302
    +        elif operation_stage == OperationStage.EXECUTING:
    
    303
    +            job.update_operation_stage(OperationStage.EXECUTING)
    
    304
    +
    
    305
    +            if self._is_instrumented:
    
    306
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    307
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    308
    +                self.__operations_by_stage[OperationStage.EXECUTING].add(job_name)
    
    309
    +                self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
    
    310
    +
    
    311
    +        elif operation_stage == OperationStage.COMPLETED:
    
    312
    +            job.update_operation_stage(OperationStage.COMPLETED)
    
    313
    +
    
    314
    +            if self._is_instrumented:
    
    315
    +                self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
    
    316
    +                self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
    
    317
    +                self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
    
    318
    +                self.__operations_by_stage[OperationStage.COMPLETED].add(job_name)
    
    319
    +
    
    320
    +                average_order, average_time = self.__queue_time_average
    
    321
    +
    
    322
    +                average_order += 1
    
    323
    +                if average_order <= 1:
    
    324
    +                    average_time = job.query_queue_time()
    
    325
    +                else:
    
    326
    +                    queue_time = job.query_queue_time()
    
    327
    +                    average_time = average_time + ((queue_time - average_time) / average_order)
    
    328
    +
    
    329
    +                self.__queue_time_average = average_order, average_time
    
    330
    +
    
    331
    +                if not job.holds_cached_action_result:
    
    332
    +                    for message_queue in self.__build_metadata_queues:
    
    333
    +                        message_queue.put(job.action_result.execution_metadata)

  • buildgrid/settings.py
    1
    +# Copyright (C) 2018 Bloomberg LP
    
    2
    +#
    
    3
    +# Licensed under the Apache License, Version 2.0 (the "License");
    
    4
    +# you may not use this file except in compliance with the License.
    
    5
    +# You may obtain a copy of the License at
    
    6
    +#
    
    7
    +#  <http://www.apache.org/licenses/LICENSE-2.0>
    
    8
    +#
    
    9
    +# Unless required by applicable law or agreed to in writing, software
    
    10
    +# distributed under the License is distributed on an "AS IS" BASIS,
    
    11
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    
    12
    +# See the License for the specific language governing permissions and
    
    13
    +# limitations under the License.
    
    14
    +
    
    15
    +
    
    1 16
     import hashlib
    
    2 17
     
    
    3 18
     
    
    4
    -# The hash function that CAS uses
    
    19
    +# Hash function used for computing digests:
    
    5 20
     HASH = hashlib.sha256
    
    21
    +
    
    22
    +# Lenght in bytes of a hash string returned by HASH:
    
    6 23
     HASH_LENGTH = HASH().digest_size * 2
    
    24
    +
    
    25
    +# Period, in seconds, for the monitoring cycle:
    
    26
    +MONITORING_PERIOD = 5.0
    
    27
    +
    
    28
    +# String format for log records:
    
    29
    +LOG_RECORD_FORMAT = '%(asctime)s:%(name)36.36s][%(levelname)5.5s]: %(message)s'
    
    30
    +# The different log record attributes are documented here:
    
    31
    +# https://docs.python.org/3/library/logging.html#logrecord-attributes

  • setup.py
    ... ... @@ -112,13 +112,15 @@ setup(
    112 112
         license="Apache License, Version 2.0",
    
    113 113
         description="A remote execution service",
    
    114 114
         packages=find_packages(),
    
    115
    +    python_requires='>= 3.5.3',  # janus requirement
    
    115 116
         install_requires=[
    
    116
    -        'protobuf',
    
    117
    -        'grpcio',
    
    118
    -        'Click',
    
    119
    -        'PyYAML',
    
    120 117
             'boto3 < 1.8.0',
    
    121 118
             'botocore < 1.11.0',
    
    119
    +        'click',
    
    120
    +        'grpcio',
    
    121
    +        'janus',
    
    122
    +        'protobuf',
    
    123
    +        'pyyaml',
    
    122 124
         ],
    
    123 125
         entry_points={
    
    124 126
             'console_scripts': [
    



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]