Martin Blanchard pushed to branch mablanch/144-jwt-authentication at BuildGrid / buildgrid
Commits:
-
db53ffbc
by Martin Blanchard at 2018-11-29T08:59:48Z
-
5ecfb7f8
by Martin Blanchard at 2018-11-29T08:59:48Z
-
df5b6a80
by Martin Blanchard at 2018-11-29T08:59:48Z
-
5e608d6b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
c167a1d0
by Martin Blanchard at 2018-11-29T08:59:48Z
-
8fc6d17d
by Martin Blanchard at 2018-11-29T08:59:48Z
-
397f385b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
dbbcdb50
by Martin Blanchard at 2018-11-29T08:59:48Z
-
50f3f63b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
62870d0a
by Martin Blanchard at 2018-11-29T09:24:03Z
-
7513caf3
by Martin Blanchard at 2018-11-29T15:38:45Z
-
0cffd69a
by Martin Blanchard at 2018-11-29T15:38:49Z
-
37c9140e
by Martin Blanchard at 2018-11-29T15:38:54Z
-
1911cd93
by Martin Blanchard at 2018-11-29T15:38:54Z
-
c6b9dd15
by Martin Blanchard at 2018-11-29T15:38:54Z
-
8a900c4e
by Martin Blanchard at 2018-11-29T15:38:54Z
-
b6c12e78
by Martin Blanchard at 2018-11-29T15:38:54Z
18 changed files:
- .pylintrc
- buildgrid/_app/commands/cmd_capabilities.py
- buildgrid/_app/commands/cmd_cas.py
- buildgrid/_app/commands/cmd_execute.py
- buildgrid/_app/commands/cmd_operation.py
- + buildgrid/client/authentication.py
- + buildgrid/server/_authentication.py
- buildgrid/server/bots/instance.py
- buildgrid/server/bots/service.py
- buildgrid/server/execution/instance.py
- buildgrid/server/execution/service.py
- buildgrid/server/instance.py
- buildgrid/server/job.py
- buildgrid/server/operations/instance.py
- buildgrid/server/operations/service.py
- buildgrid/server/scheduler.py
- buildgrid/settings.py
- setup.py
Changes:
... | ... | @@ -185,6 +185,7 @@ ignore-on-opaque-inference=yes |
185 | 185 |
# for classes with dynamically set attributes). This supports the use of
|
186 | 186 |
# qualified names.
|
187 | 187 |
ignored-classes=google.protobuf.any_pb2.Any,
|
188 |
+ google.protobuf.duration_pb2.Duration,
|
|
188 | 189 |
google.protobuf.timestamp_pb2.Timestamp
|
189 | 190 |
|
190 | 191 |
# List of module names for which member attributes should not be checked
|
... | ... | @@ -460,6 +461,8 @@ known-third-party=boto3, |
460 | 461 |
enchant,
|
461 | 462 |
google,
|
462 | 463 |
grpc,
|
464 |
+ janus,
|
|
465 |
+ jwt,
|
|
463 | 466 |
moto,
|
464 | 467 |
yaml
|
465 | 468 |
|
... | ... | @@ -523,4 +526,4 @@ valid-metaclass-classmethod-first-arg=mcs |
523 | 526 |
|
524 | 527 |
# Exceptions that will emit a warning when being caught. Defaults to
|
525 | 528 |
# "Exception"
|
526 |
-overgeneral-exceptions=Exception
|
|
529 |
+overgeneral-exceptions=Exception
|
|
\ No newline at end of file |
... | ... | @@ -17,9 +17,12 @@ import sys |
17 | 17 |
from urllib.parse import urlparse
|
18 | 18 |
|
19 | 19 |
import click
|
20 |
+from google.protobuf import json_format
|
|
20 | 21 |
import grpc
|
21 | 22 |
|
23 |
+from buildgrid.client.authentication import setup_channel
|
|
22 | 24 |
from buildgrid.client.capabilities import CapabilitiesInterface
|
25 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
23 | 26 |
|
24 | 27 |
from ..cli import pass_context
|
25 | 28 |
|
... | ... | @@ -27,32 +30,29 @@ from ..cli import pass_context |
27 | 30 |
@click.command(name='capabilities', short_help="Capabilities service.")
|
28 | 31 |
@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
|
29 | 32 |
help="Remote execution server's URL (port defaults to 50051 if no specified).")
|
33 |
+@click.option('--auth-token', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
34 |
+ help="Authorization token for the remote.")
|
|
30 | 35 |
@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
|
31 |
- help="Private client key for TLS (PEM-encoded)")
|
|
36 |
+ help="Private client key for TLS (PEM-encoded).")
|
|
32 | 37 |
@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
33 |
- help="Public client certificate for TLS (PEM-encoded)")
|
|
38 |
+ help="Public client certificate for TLS (PEM-encoded).")
|
|
34 | 39 |
@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
35 |
- help="Public server certificate for TLS (PEM-encoded)")
|
|
40 |
+ help="Public server certificate for TLS (PEM-encoded).")
|
|
36 | 41 |
@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
|
37 | 42 |
help="Targeted farm instance name.")
|
38 | 43 |
@pass_context
|
39 |
-def cli(context, remote, instance_name, client_key, client_cert, server_cert):
|
|
40 |
- click.echo("Getting capabilities...")
|
|
41 |
- url = urlparse(remote)
|
|
42 |
- |
|
43 |
- remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
44 |
- instance_name = instance_name
|
|
45 |
- |
|
46 |
- if url.scheme == 'http':
|
|
47 |
- channel = grpc.insecure_channel(remote)
|
|
48 |
- else:
|
|
49 |
- credentials = context.load_client_credentials(client_key, client_cert, server_cert)
|
|
50 |
- if not credentials:
|
|
51 |
- click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
|
|
52 |
- sys.exit(-1)
|
|
53 |
- |
|
54 |
- channel = grpc.secure_channel(remote, credentials)
|
|
55 |
- |
|
56 |
- interface = CapabilitiesInterface(channel)
|
|
57 |
- response = interface.get_capabilities(instance_name)
|
|
58 |
- click.echo(response)
|
|
44 |
+def cli(context, remote, instance_name, auth_token, client_key, client_cert, server_cert):
|
|
45 |
+ """Entry point for the bgd-capabilities CLI command group."""
|
|
46 |
+ try:
|
|
47 |
+ context.channel = setup_channel(remote, authorization_token=auth_token,
|
|
48 |
+ client_key=client_key, client_cert=client_cert)
|
|
49 |
+ |
|
50 |
+ except InvalidArgumentError as e:
|
|
51 |
+ click.echo("Error: {}.".format(e), err=True)
|
|
52 |
+ |
|
53 |
+ context.instance_name = instance_name
|
|
54 |
+ |
|
55 |
+ interface = CapabilitiesInterface(context.channel)
|
|
56 |
+ response = interface.get_capabilities(context.instance_name)
|
|
57 |
+ |
|
58 |
+ click.echo(json_format.MessageToJson(response))
|
... | ... | @@ -27,7 +27,9 @@ from urllib.parse import urlparse |
27 | 27 |
import click
|
28 | 28 |
import grpc
|
29 | 29 |
|
30 |
+from buildgrid.client.authentication import setup_channel
|
|
30 | 31 |
from buildgrid.client.cas import download, upload
|
32 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
31 | 33 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
32 | 34 |
from buildgrid.utils import create_digest, merkle_tree_maker, read_file
|
33 | 35 |
|
... | ... | @@ -37,32 +39,27 @@ from ..cli import pass_context |
37 | 39 |
@click.group(name='cas', short_help="Interact with the CAS server.")
|
38 | 40 |
@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
|
39 | 41 |
help="Remote execution server's URL (port defaults to 50051 if no specified).")
|
42 |
+@click.option('--auth-token', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
43 |
+ help="Authorization token for the remote.")
|
|
40 | 44 |
@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
|
41 |
- help="Private client key for TLS (PEM-encoded)")
|
|
45 |
+ help="Private client key for TLS (PEM-encoded).")
|
|
42 | 46 |
@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
43 |
- help="Public client certificate for TLS (PEM-encoded)")
|
|
47 |
+ help="Public client certificate for TLS (PEM-encoded).")
|
|
44 | 48 |
@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
45 | 49 |
help="Public server certificate for TLS (PEM-encoded)")
|
46 | 50 |
@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
|
47 | 51 |
help="Targeted farm instance name.")
|
48 | 52 |
@pass_context
|
49 |
-def cli(context, remote, instance_name, client_key, client_cert, server_cert):
|
|
50 |
- url = urlparse(remote)
|
|
53 |
+def cli(context, remote, instance_name, auth_token, client_key, client_cert, server_cert):
|
|
54 |
+ """Entry point for the bgd-cas CLI command group."""
|
|
55 |
+ try:
|
|
56 |
+ context.channel = setup_channel(remote, authorization_token=auth_token,
|
|
57 |
+ client_key=client_key, client_cert=client_cert)
|
|
51 | 58 |
|
52 |
- context.remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
53 |
- context.instance_name = instance_name
|
|
54 |
- |
|
55 |
- if url.scheme == 'http':
|
|
56 |
- context.channel = grpc.insecure_channel(context.remote)
|
|
57 |
- else:
|
|
58 |
- credentials = context.load_client_credentials(client_key, client_cert, server_cert)
|
|
59 |
- if not credentials:
|
|
60 |
- click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
|
|
61 |
- sys.exit(-1)
|
|
59 |
+ except InvalidArgumentError as e:
|
|
60 |
+ click.echo("Error: {}.".format(e), err=True)
|
|
62 | 61 |
|
63 |
- context.channel = grpc.secure_channel(context.remote, credentials)
|
|
64 |
- |
|
65 |
- click.echo("Starting for remote=[{}]".format(context.remote))
|
|
62 |
+ context.instance_name = instance_name
|
|
66 | 63 |
|
67 | 64 |
|
68 | 65 |
@cli.command('upload-dummy', short_help="Upload a dummy action. Should be used with `execute dummy-request`")
|
... | ... | @@ -28,7 +28,9 @@ from urllib.parse import urlparse |
28 | 28 |
import click
|
29 | 29 |
import grpc
|
30 | 30 |
|
31 |
+from buildgrid.client.authentication import setup_channel
|
|
31 | 32 |
from buildgrid.client.cas import download, upload
|
33 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
32 | 34 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
33 | 35 |
from buildgrid.utils import create_digest
|
34 | 36 |
|
... | ... | @@ -38,32 +40,27 @@ from ..cli import pass_context |
38 | 40 |
@click.group(name='execute', short_help="Execute simple operations.")
|
39 | 41 |
@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
|
40 | 42 |
help="Remote execution server's URL (port defaults to 50051 if no specified).")
|
43 |
+@click.option('--auth-token', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
44 |
+ help="Authorization token for the remote.")
|
|
41 | 45 |
@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
|
42 |
- help="Private client key for TLS (PEM-encoded)")
|
|
46 |
+ help="Private client key for TLS (PEM-encoded).")
|
|
43 | 47 |
@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
44 |
- help="Public client certificate for TLS (PEM-encoded)")
|
|
48 |
+ help="Public client certificate for TLS (PEM-encoded).")
|
|
45 | 49 |
@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
46 |
- help="Public server certificate for TLS (PEM-encoded)")
|
|
50 |
+ help="Public server certificate for TLS (PEM-encoded).")
|
|
47 | 51 |
@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
|
48 | 52 |
help="Targeted farm instance name.")
|
49 | 53 |
@pass_context
|
50 | 54 |
def cli(context, remote, instance_name, client_key, client_cert, server_cert):
|
51 |
- url = urlparse(remote)
|
|
55 |
+ """Entry point for the bgd-execute CLI command group."""
|
|
56 |
+ try:
|
|
57 |
+ context.channel = setup_channel(remote, authorization_token=auth_token,
|
|
58 |
+ client_key=client_key, client_cert=client_cert)
|
|
52 | 59 |
|
53 |
- context.remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
54 |
- context.instance_name = instance_name
|
|
55 |
- |
|
56 |
- if url.scheme == 'http':
|
|
57 |
- context.channel = grpc.insecure_channel(context.remote)
|
|
58 |
- else:
|
|
59 |
- credentials = context.load_client_credentials(client_key, client_cert, server_cert)
|
|
60 |
- if not credentials:
|
|
61 |
- click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
|
|
62 |
- sys.exit(-1)
|
|
60 |
+ except InvalidArgumentError as e:
|
|
61 |
+ click.echo("Error: {}.".format(e), err=True)
|
|
63 | 62 |
|
64 |
- context.channel = grpc.secure_channel(context.remote, credentials)
|
|
65 |
- |
|
66 |
- click.echo("Starting for remote=[{}]".format(context.remote))
|
|
63 |
+ context.instance_name = instance_name
|
|
67 | 64 |
|
68 | 65 |
|
69 | 66 |
@cli.command('request-dummy', short_help="Send a dummy action.")
|
... | ... | @@ -30,7 +30,9 @@ import click |
30 | 30 |
from google.protobuf import json_format
|
31 | 31 |
import grpc
|
32 | 32 |
|
33 |
+from buildgrid.client.authentication import setup_channel
|
|
33 | 34 |
from buildgrid._enums import OperationStage
|
35 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
34 | 36 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
35 | 37 |
from buildgrid._protos.google.longrunning import operations_pb2, operations_pb2_grpc
|
36 | 38 |
from buildgrid._protos.google.rpc import code_pb2
|
... | ... | @@ -41,32 +43,27 @@ from ..cli import pass_context |
41 | 43 |
@click.group(name='operation', short_help="Long running operations commands.")
|
42 | 44 |
@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
|
43 | 45 |
help="Remote execution server's URL (port defaults to 50051 if no specified).")
|
46 |
+@click.option('--auth-token', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
47 |
+ help="Authorization token for the remote.")
|
|
44 | 48 |
@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
|
45 |
- help="Private client key for TLS (PEM-encoded)")
|
|
49 |
+ help="Private client key for TLS (PEM-encoded).")
|
|
46 | 50 |
@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
47 |
- help="Public client certificate for TLS (PEM-encoded)")
|
|
51 |
+ help="Public client certificate for TLS (PEM-encoded).")
|
|
48 | 52 |
@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
49 |
- help="Public server certificate for TLS (PEM-encoded)")
|
|
53 |
+ help="Public server certificate for TLS (PEM-encoded).")
|
|
50 | 54 |
@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
|
51 | 55 |
help="Targeted farm instance name.")
|
52 | 56 |
@pass_context
|
53 | 57 |
def cli(context, remote, instance_name, client_key, client_cert, server_cert):
|
54 |
- url = urlparse(remote)
|
|
58 |
+ """Entry point for the bgd-operation CLI command group."""
|
|
59 |
+ try:
|
|
60 |
+ context.channel = setup_channel(remote, authorization_token=auth_token,
|
|
61 |
+ client_key=client_key, client_cert=client_cert)
|
|
55 | 62 |
|
56 |
- context.remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
57 |
- context.instance_name = instance_name
|
|
58 |
- |
|
59 |
- if url.scheme == 'http':
|
|
60 |
- context.channel = grpc.insecure_channel(context.remote)
|
|
61 |
- else:
|
|
62 |
- credentials = context.load_client_credentials(client_key, client_cert, server_cert)
|
|
63 |
- if not credentials:
|
|
64 |
- click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
|
|
65 |
- sys.exit(-1)
|
|
63 |
+ except InvalidArgumentError as e:
|
|
64 |
+ click.echo("Error: {}.".format(e), err=True)
|
|
66 | 65 |
|
67 |
- context.channel = grpc.secure_channel(context.remote, credentials)
|
|
68 |
- |
|
69 |
- click.echo("Starting for remote=[{}]".format(context.remote))
|
|
66 |
+ context.instance_name = instance_name
|
|
70 | 67 |
|
71 | 68 |
|
72 | 69 |
def _print_operation_status(operation, print_details=False):
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+import base64
|
|
17 |
+from collections import namedtuple
|
|
18 |
+from urllib.parse import urlparse
|
|
19 |
+import os
|
|
20 |
+ |
|
21 |
+import grpc
|
|
22 |
+ |
|
23 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
24 |
+from buildgrid.utils import read_file
|
|
25 |
+ |
|
26 |
+ |
|
27 |
+def load_tls_channel_credentials(client_key=None, client_cert=None, server_cert=None):
|
|
28 |
+ """Looks-up and loads TLS gRPC client channel credentials.
|
|
29 |
+ |
|
30 |
+ Args:
|
|
31 |
+ client_key(str, optional): Client certificate chain file path.
|
|
32 |
+ client_cert(str, optional): Client private key file path.
|
|
33 |
+ server_cert(str, optional): Serve root certificate file path.
|
|
34 |
+ |
|
35 |
+ Returns:
|
|
36 |
+ ChannelCredentials: Credentials to be used for a TLS-encrypted gRPC
|
|
37 |
+ client channel.
|
|
38 |
+ """
|
|
39 |
+ if server_cert and os.path.exists(server_cert):
|
|
40 |
+ server_cert_pem = read_file(server_cert)
|
|
41 |
+ else:
|
|
42 |
+ server_cert_pem = None
|
|
43 |
+ |
|
44 |
+ if client_key and os.path.exists(client_key):
|
|
45 |
+ client_key_pem = read_file(client_key)
|
|
46 |
+ else:
|
|
47 |
+ client_key_pem = None
|
|
48 |
+ |
|
49 |
+ if client_key_pem and client_cert and os.path.exists(client_cert):
|
|
50 |
+ client_cert_pem = read_file(client_cert)
|
|
51 |
+ else:
|
|
52 |
+ client_cert_pem = None
|
|
53 |
+ |
|
54 |
+ credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_pem,
|
|
55 |
+ private_key=client_key_pem,
|
|
56 |
+ certificate_chain=client_cert_pem)
|
|
57 |
+ return credentials
|
|
58 |
+ |
|
59 |
+ |
|
60 |
+def load_channel_authorization_token(authorization_token=None):
|
|
61 |
+ """Looks-up and loads client authorization token.
|
|
62 |
+ |
|
63 |
+ Args:
|
|
64 |
+ authorization_token(str, optional): Token file path.
|
|
65 |
+ |
|
66 |
+ Returns:
|
|
67 |
+ str: Encoded token string.
|
|
68 |
+ """
|
|
69 |
+ if authorization_token and os.path.exists(authorization_token):
|
|
70 |
+ return read_file(authorization_token)
|
|
71 |
+ |
|
72 |
+ #TODO: Try loading the token from a default location?
|
|
73 |
+ |
|
74 |
+ return None
|
|
75 |
+ |
|
76 |
+ |
|
77 |
+def setup_channel(remote_url, authorization_token=None,
|
|
78 |
+ client_key=None, client_cert=None, server_cert=None):
|
|
79 |
+ """Creates a new gRPC client communication chanel.
|
|
80 |
+ |
|
81 |
+ If `remote_url` does not specifies a port number, defaults 50051.
|
|
82 |
+ |
|
83 |
+ Args:
|
|
84 |
+ remote_url (str): URL for the remote, including port and protocol.
|
|
85 |
+ authorization_token (str): Authorization token file path.
|
|
86 |
+ server_cert(str): TLS certificate chain file path.
|
|
87 |
+ client_key(str): TLS root certificate file path.
|
|
88 |
+ client_cert(str): TLS private key file path.
|
|
89 |
+ |
|
90 |
+ Returns:
|
|
91 |
+ (str, Channel):
|
|
92 |
+ |
|
93 |
+ Raises:
|
|
94 |
+ InvalidArgumentError: On any input parsing error.
|
|
95 |
+ """
|
|
96 |
+ url = urlparse(remote_url)
|
|
97 |
+ remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
98 |
+ |
|
99 |
+ if url.scheme == 'http':
|
|
100 |
+ channel = grpc.insecure_channel(remote)
|
|
101 |
+ |
|
102 |
+ elif url.scheme == 'https':
|
|
103 |
+ credentials = load_tls_channel_credentials(client_key, client_cert, server_cert)
|
|
104 |
+ if not credentials:
|
|
105 |
+ raise InvalidArgumentError("Given TLS details (or defaults) could be loaded")
|
|
106 |
+ |
|
107 |
+ channel = grpc.secure_channel(remote, credentials)
|
|
108 |
+ |
|
109 |
+ else:
|
|
110 |
+ raise InvalidArgumentError("Given remote does not specify a protocol")
|
|
111 |
+ |
|
112 |
+ if authorization_token is not None:
|
|
113 |
+ token = load_channel_authorization_token(authorization_token)
|
|
114 |
+ if not token:
|
|
115 |
+ raise InvalidArgumentError("Given authorization token could be loaded")
|
|
116 |
+ |
|
117 |
+ interpector = AuthMetadataClientInterceptor(token)
|
|
118 |
+ channel = grpc.intercept_channel(channel, interpector)
|
|
119 |
+ |
|
120 |
+ return channel
|
|
121 |
+ |
|
122 |
+ |
|
123 |
+class AuthMetadataClientInterceptor(
|
|
124 |
+ grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor,
|
|
125 |
+ grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor):
|
|
126 |
+ |
|
127 |
+ def __init__(self, authorization_token=None, authorization_secret=None):
|
|
128 |
+ """Initialises a new :class:`AuthMetadataClientInterceptor`.
|
|
129 |
+ |
|
130 |
+ Args:
|
|
131 |
+ authorization_token (str): Authorization token as a string.
|
|
132 |
+ """
|
|
133 |
+ if authorization_token:
|
|
134 |
+ self.__secret = authorization_token.strip()
|
|
135 |
+ else:
|
|
136 |
+ self.__secret = base64.b64encode(authorization_secret)
|
|
137 |
+ |
|
138 |
+ self.__header_field_name = 'authorization'
|
|
139 |
+ self.__header_field_value = 'Bearer {}'.format(self.__secret)
|
|
140 |
+ |
|
141 |
+ def intercept_unary_unary(self, continuation, client_call_details, request):
|
|
142 |
+ new_details = self._amend_call_details(client_call_details)
|
|
143 |
+ |
|
144 |
+ return continuation(new_details, request)
|
|
145 |
+ |
|
146 |
+ def intercept_unary_stream(self, continuation, client_call_details, request):
|
|
147 |
+ new_details = self._amend_call_details(client_call_details)
|
|
148 |
+ |
|
149 |
+ return continuation(new_details, request)
|
|
150 |
+ |
|
151 |
+ def intercept_stream_unary(self, continuation, client_call_details, request_iterator):
|
|
152 |
+ new_details = self._amend_call_details(client_call_details)
|
|
153 |
+ |
|
154 |
+ return continuation(new_details, request_iterator)
|
|
155 |
+ |
|
156 |
+ def intercept_stream_stream(self, continuation, client_call_details, request_iterator):
|
|
157 |
+ new_details = self._amend_call_details(client_call_details)
|
|
158 |
+ |
|
159 |
+ return continuation(new_details, request_iterator)
|
|
160 |
+ |
|
161 |
+ def _amend_call_details(self, client_call_details):
|
|
162 |
+ if client_call_details.metadata is not None:
|
|
163 |
+ new_metadata = list(client_call_details.metadata)
|
|
164 |
+ else:
|
|
165 |
+ new_metadata = []
|
|
166 |
+ |
|
167 |
+ new_metadata.append(
|
|
168 |
+ (self.__header_field_name, self.__header_field_value,))
|
|
169 |
+ |
|
170 |
+ class _ClientCallDetails(
|
|
171 |
+ namedtuple('_ClientCallDetails',
|
|
172 |
+ ('method', 'timeout', 'credentials', 'metadata')),
|
|
173 |
+ grpc.ClientCallDetails):
|
|
174 |
+ pass
|
|
175 |
+ |
|
176 |
+ return _ClientCallDetails(client_call_details.method,
|
|
177 |
+ client_call_details.timeout,
|
|
178 |
+ client_call_details.credentials,
|
|
179 |
+ new_metadata)
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+from datetime import datetime
|
|
17 |
+from enum import Enum
|
|
18 |
+import logging
|
|
19 |
+ |
|
20 |
+import grpc
|
|
21 |
+import jwt
|
|
22 |
+ |
|
23 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
24 |
+ |
|
25 |
+ |
|
26 |
+class JwtAlgorithm(Enum):
|
|
27 |
+ # HMAC algorithms:
|
|
28 |
+ HS256 = 'HS256'
|
|
29 |
+ HS384 = 'HS384'
|
|
30 |
+ HS512 = 'HS512'
|
|
31 |
+ |
|
32 |
+ # RSASSA-PKCS algorithms:
|
|
33 |
+ RS256 = 'RS256'
|
|
34 |
+ RS384 = 'RS384'
|
|
35 |
+ RS512 = 'RS512'
|
|
36 |
+ |
|
37 |
+ # RSASSA-PSS algorithms:
|
|
38 |
+ PS256 = 'PS256'
|
|
39 |
+ PS384 = 'PS384'
|
|
40 |
+ PS512 = 'PS512'
|
|
41 |
+ |
|
42 |
+ # ECDSA algorithms:
|
|
43 |
+ ES256 = 'ES256'
|
|
44 |
+ ES384 = 'ES384'
|
|
45 |
+ ES521 = 'ES521'
|
|
46 |
+ ES512 = 'ES512'
|
|
47 |
+ |
|
48 |
+ |
|
49 |
+class AuthMetadataServerInterceptor(grpc.ServerInterceptor):
|
|
50 |
+ |
|
51 |
+ __auth_errors = {
|
|
52 |
+ 'missing-bearer': 'Missing authentication header field',
|
|
53 |
+ 'invalid-bearer': 'Invalid authentication header field',
|
|
54 |
+ 'invalid-token': 'Invalid authentication token',
|
|
55 |
+ 'expired-token': 'Expired authentication token',
|
|
56 |
+ 'unbounded-token': 'Unbounded authentication token',
|
|
57 |
+ }
|
|
58 |
+ |
|
59 |
+ def __init__(self, secret, algorithm):
|
|
60 |
+ """Initialises a new :class:`AuthMetadataServerInterceptor`.
|
|
61 |
+ |
|
62 |
+ Args:
|
|
63 |
+ secret (str): Symetric secret key or asymetric public key.
|
|
64 |
+ algorithm (JwtAlgorithm): Algorithm used to encode `secret`.
|
|
65 |
+ |
|
66 |
+ Raises:
|
|
67 |
+ InvalidArgumentError: If `algorithm` is not supported.
|
|
68 |
+ """
|
|
69 |
+ self.__logger = logging.getLogger(__name__)
|
|
70 |
+ |
|
71 |
+ self.__bearer_cache = {}
|
|
72 |
+ self.__terminators = {}
|
|
73 |
+ self.__secret = secret
|
|
74 |
+ self.__algorithms = [algorithm.value]
|
|
75 |
+ |
|
76 |
+ try:
|
|
77 |
+ jwt.register_algorithm(self.__algorithms[0], None)
|
|
78 |
+ |
|
79 |
+ except TypeError:
|
|
80 |
+ raise InvalidArgumentError("Algorithm not supported for JWT decoding: [{}]"
|
|
81 |
+ .format(self._algorithm))
|
|
82 |
+ |
|
83 |
+ except ValueError:
|
|
84 |
+ pass
|
|
85 |
+ |
|
86 |
+ for code, message in self.__auth_errors.items():
|
|
87 |
+ self.__terminators[code] = _unary_unary_rpc_terminator(message)
|
|
88 |
+ |
|
89 |
+ @property
|
|
90 |
+ def algorithm(self):
|
|
91 |
+ return JwtAlgorithm(self.__algorithms[0])
|
|
92 |
+ |
|
93 |
+ def intercept_service(self, continuation, handler_call_details):
|
|
94 |
+ try:
|
|
95 |
+ # Reject requests not carrying a token:
|
|
96 |
+ bearer = dict(handler_call_details.invocation_metadata)['authorization']
|
|
97 |
+ |
|
98 |
+ except KeyError:
|
|
99 |
+ self.__logger.error("Rejecting '{}' request: {}"
|
|
100 |
+ .format(handler_call_details.method.split('/')[-1],
|
|
101 |
+ self.__auth_errors['missing-bearer']))
|
|
102 |
+ return self.__terminators['missing-bearer']
|
|
103 |
+ |
|
104 |
+ # Reject requests with malformated bearer:
|
|
105 |
+ if not bearer.startswith('Bearer '):
|
|
106 |
+ self.__logger.error("Rejecting '{}' request: {}"
|
|
107 |
+ .format(handler_call_details.method.split('/')[-1],
|
|
108 |
+ self.__auth_errors['invalid-bearer']))
|
|
109 |
+ return self.__terminators['invalid-bearer']
|
|
110 |
+ |
|
111 |
+ try:
|
|
112 |
+ # Hit the cache for already validated token:
|
|
113 |
+ expiration_time = self.__bearer_cache[bearer]
|
|
114 |
+ |
|
115 |
+ # Accept request if cached token hasn't expired yet:
|
|
116 |
+ if expiration_time < datetime.utcnow():
|
|
117 |
+ return continuation(handler_call_details) # Accepted
|
|
118 |
+ |
|
119 |
+ except KeyError:
|
|
120 |
+ pass
|
|
121 |
+ |
|
122 |
+ try:
|
|
123 |
+ # Decode and validate the new token:
|
|
124 |
+ payload = jwt.decode(bearer[7:], self.__secret, algorithms=self.__algorithms)
|
|
125 |
+ |
|
126 |
+ except jwt.exceptions.ExpiredSignatureError as e:
|
|
127 |
+ self.__logger.error("Rejecting '{}' request: {}; {}"
|
|
128 |
+ .format(handler_call_details.method.split('/')[-1],
|
|
129 |
+ self.__auth_errors['expired-token'], str(e)))
|
|
130 |
+ return self.__terminators['expired-token']
|
|
131 |
+ |
|
132 |
+ except jwt.exceptions.InvalidTokenError as e:
|
|
133 |
+ self.__logger.error("Rejecting '{}' request: {}; {}"
|
|
134 |
+ .format(handler_call_details.method.split('/')[-1],
|
|
135 |
+ self.__auth_errors['invalid-token'], str(e)))
|
|
136 |
+ return self.__terminators['invalid-token']
|
|
137 |
+ |
|
138 |
+ # Do not accept token without an expiration time:
|
|
139 |
+ if 'exp' not in payload or not isinstance(payload['exp'], int):
|
|
140 |
+ self.__logger.error("Rejecting '{}' request: {}"
|
|
141 |
+ .format(handler_call_details.method.split('/')[-1],
|
|
142 |
+ self.__auth_errors['unbounded-token']))
|
|
143 |
+ return self.__terminators['unbounded-token']
|
|
144 |
+ |
|
145 |
+ # Cache the validated token and store expiration time:
|
|
146 |
+ self.__bearer_cache[bearer] = datetime.fromtimestamp(payload['exp'])
|
|
147 |
+ |
|
148 |
+ return continuation(handler_call_details) # Accepted
|
|
149 |
+ |
|
150 |
+ |
|
151 |
+def _unary_unary_rpc_terminator(details):
|
|
152 |
+ |
|
153 |
+ def terminate(ignored_request, context):
|
|
154 |
+ context.abort(grpc.StatusCode.UNAUTHENTICATED, details)
|
|
155 |
+ |
|
156 |
+ return grpc.unary_unary_rpc_method_handler(terminate)
|
... | ... | @@ -37,6 +37,10 @@ class BotsInterface: |
37 | 37 |
self._assigned_leases = {}
|
38 | 38 |
self._scheduler = scheduler
|
39 | 39 |
|
40 |
+ @property
|
|
41 |
+ def scheduler(self):
|
|
42 |
+ return self._scheduler
|
|
43 |
+ |
|
40 | 44 |
def register_instance_with_server(self, instance_name, server):
|
41 | 45 |
server.add_bots_interface(self, instance_name)
|
42 | 46 |
|
... | ... | @@ -23,8 +23,9 @@ import logging |
23 | 23 |
|
24 | 24 |
import grpc
|
25 | 25 |
|
26 |
-from google.protobuf.empty_pb2 import Empty
|
|
26 |
+from google.protobuf import empty_pb2, timestamp_pb2
|
|
27 | 27 |
|
28 |
+from buildgrid._enums import BotStatus
|
|
28 | 29 |
from buildgrid._exceptions import InvalidArgumentError, OutOfSyncError
|
29 | 30 |
from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
|
30 | 31 |
from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grpc
|
... | ... | @@ -32,24 +33,86 @@ from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grp |
32 | 33 |
|
33 | 34 |
class BotsService(bots_pb2_grpc.BotsServicer):
|
34 | 35 |
|
35 |
- def __init__(self, server):
|
|
36 |
+ def __init__(self, server, monitor=False):
|
|
36 | 37 |
self.__logger = logging.getLogger(__name__)
|
37 | 38 |
|
39 |
+ self.__bots_by_status = None
|
|
40 |
+ self.__bots_by_instance = None
|
|
41 |
+ self.__bots = None
|
|
42 |
+ |
|
38 | 43 |
self._instances = {}
|
39 | 44 |
|
40 | 45 |
bots_pb2_grpc.add_BotsServicer_to_server(self, server)
|
41 | 46 |
|
42 |
- def add_instance(self, name, instance):
|
|
43 |
- self._instances[name] = instance
|
|
47 |
+ self._is_instrumented = monitor
|
|
48 |
+ |
|
49 |
+ if self._is_instrumented:
|
|
50 |
+ self.__bots_by_status = {}
|
|
51 |
+ self.__bots_by_instance = {}
|
|
52 |
+ self.__bots = {}
|
|
53 |
+ |
|
54 |
+ self.__bots_by_status[BotStatus.OK] = set()
|
|
55 |
+ self.__bots_by_status[BotStatus.UNHEALTHY] = set()
|
|
56 |
+ |
|
57 |
+ # --- Public API ---
|
|
58 |
+ |
|
59 |
+ def add_instance(self, instance_name, instance):
|
|
60 |
+ """Registers a new servicer instance.
|
|
61 |
+ |
|
62 |
+ Args:
|
|
63 |
+ instance_name (str): The new instance's name.
|
|
64 |
+ instance (BotsInterface): The new instance itself.
|
|
65 |
+ """
|
|
66 |
+ self._instances[instance_name] = instance
|
|
67 |
+ |
|
68 |
+ if self._is_instrumented:
|
|
69 |
+ self.__bots_by_instance[instance_name] = set()
|
|
70 |
+ |
|
71 |
+ def get_scheduler(self, instance_name):
|
|
72 |
+ """Retrieves a reference to the scheduler for an instance.
|
|
73 |
+ |
|
74 |
+ Args:
|
|
75 |
+ instance_name (str): The name of the instance to query.
|
|
76 |
+ |
|
77 |
+ Returns:
|
|
78 |
+ Scheduler: A reference to the scheduler for `instance_name`.
|
|
79 |
+ |
|
80 |
+ Raises:
|
|
81 |
+ InvalidArgumentError: If no instance named `instance_name` exists.
|
|
82 |
+ """
|
|
83 |
+ instance = self._get_instance(instance_name)
|
|
84 |
+ |
|
85 |
+ return instance.scheduler
|
|
86 |
+ |
|
87 |
+ # --- Public API: Servicer ---
|
|
44 | 88 |
|
45 | 89 |
def CreateBotSession(self, request, context):
|
90 |
+ """Handles CreateBotSessionRequest messages.
|
|
91 |
+ |
|
92 |
+ Args:
|
|
93 |
+ request (CreateBotSessionRequest): The incoming RPC request.
|
|
94 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
95 |
+ """
|
|
46 | 96 |
self.__logger.debug("CreateBotSession request from [%s]", context.peer())
|
47 | 97 |
|
98 |
+ instance_name = request.parent
|
|
99 |
+ bot_status = BotStatus(request.bot_session.status)
|
|
100 |
+ bot_id = request.bot_session.bot_id
|
|
101 |
+ |
|
48 | 102 |
try:
|
49 |
- parent = request.parent
|
|
50 |
- instance = self._get_instance(request.parent)
|
|
51 |
- return instance.create_bot_session(parent,
|
|
52 |
- request.bot_session)
|
|
103 |
+ instance = self._get_instance(instance_name)
|
|
104 |
+ bot_session = instance.create_bot_session(instance_name,
|
|
105 |
+ request.bot_session)
|
|
106 |
+ now = timestamp_pb2.Timestamp()
|
|
107 |
+ now.GetCurrentTime()
|
|
108 |
+ |
|
109 |
+ if self._is_instrumented:
|
|
110 |
+ self.__bots[bot_id] = now
|
|
111 |
+ self.__bots_by_instance[instance_name].add(bot_id)
|
|
112 |
+ if bot_status in self.__bots_by_status:
|
|
113 |
+ self.__bots_by_status[bot_status].add(bot_id)
|
|
114 |
+ |
|
115 |
+ return bot_session
|
|
53 | 116 |
|
54 | 117 |
except InvalidArgumentError as e:
|
55 | 118 |
self.__logger.error(e)
|
... | ... | @@ -59,17 +122,41 @@ class BotsService(bots_pb2_grpc.BotsServicer): |
59 | 122 |
return bots_pb2.BotSession()
|
60 | 123 |
|
61 | 124 |
def UpdateBotSession(self, request, context):
|
125 |
+ """Handles UpdateBotSessionRequest messages.
|
|
126 |
+ |
|
127 |
+ Args:
|
|
128 |
+ request (UpdateBotSessionRequest): The incoming RPC request.
|
|
129 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
130 |
+ """
|
|
62 | 131 |
self.__logger.debug("UpdateBotSession request from [%s]", context.peer())
|
63 | 132 |
|
133 |
+ names = request.name.split("/")
|
|
134 |
+ bot_status = BotStatus(request.bot_session.status)
|
|
135 |
+ bot_id = request.bot_session.bot_id
|
|
136 |
+ |
|
64 | 137 |
try:
|
65 |
- names = request.name.split("/")
|
|
66 |
- # Operation name should be in format:
|
|
67 |
- # {instance/name}/{uuid}
|
|
68 |
- instance_name = ''.join(names[0:-1])
|
|
138 |
+ instance_name = '/'.join(names[:-1])
|
|
69 | 139 |
|
70 | 140 |
instance = self._get_instance(instance_name)
|
71 |
- return instance.update_bot_session(request.name,
|
|
72 |
- request.bot_session)
|
|
141 |
+ bot_session = instance.update_bot_session(request.name,
|
|
142 |
+ request.bot_session)
|
|
143 |
+ |
|
144 |
+ if self._is_instrumented:
|
|
145 |
+ self.__bots[bot_id].GetCurrentTime()
|
|
146 |
+ if bot_id not in self.__bots_by_status[bot_status]:
|
|
147 |
+ if bot_status == BotStatus.OK:
|
|
148 |
+ self.__bots_by_status[BotStatus.OK].add(bot_id)
|
|
149 |
+ self.__bots_by_status[BotStatus.UNHEALTHY].discard(bot_id)
|
|
150 |
+ |
|
151 |
+ elif bot_status == BotStatus.UNHEALTHY:
|
|
152 |
+ self.__bots_by_status[BotStatus.OK].discard(bot_id)
|
|
153 |
+ self.__bots_by_status[BotStatus.UNHEALTHY].add(bot_id)
|
|
154 |
+ |
|
155 |
+ else:
|
|
156 |
+ self.__bots_by_instance[instance_name].remove(bot_id)
|
|
157 |
+ del self.__bots[bot_id]
|
|
158 |
+ |
|
159 |
+ return bot_session
|
|
73 | 160 |
|
74 | 161 |
except InvalidArgumentError as e:
|
75 | 162 |
self.__logger.error(e)
|
... | ... | @@ -89,10 +176,47 @@ class BotsService(bots_pb2_grpc.BotsServicer): |
89 | 176 |
return bots_pb2.BotSession()
|
90 | 177 |
|
91 | 178 |
def PostBotEventTemp(self, request, context):
|
179 |
+ """Handles PostBotEventTempRequest messages.
|
|
180 |
+ |
|
181 |
+ Args:
|
|
182 |
+ request (PostBotEventTempRequest): The incoming RPC request.
|
|
183 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
184 |
+ """
|
|
92 | 185 |
self.__logger.debug("PostBotEventTemp request from [%s]", context.peer())
|
93 | 186 |
|
94 | 187 |
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
95 |
- return Empty()
|
|
188 |
+ |
|
189 |
+ return empty_pb2.Empty()
|
|
190 |
+ |
|
191 |
+ # --- Public API: Monitoring ---
|
|
192 |
+ |
|
193 |
+ @property
|
|
194 |
+ def is_instrumented(self):
|
|
195 |
+ return self._is_instrumented
|
|
196 |
+ |
|
197 |
+ def query_n_bots(self):
|
|
198 |
+ if self.__bots is not None:
|
|
199 |
+ return len(self.__bots)
|
|
200 |
+ |
|
201 |
+ return 0
|
|
202 |
+ |
|
203 |
+ def query_n_bots_for_instance(self, instance_name):
|
|
204 |
+ try:
|
|
205 |
+ if self.__bots_by_instance is not None:
|
|
206 |
+ return len(self.__bots_by_instance[instance_name])
|
|
207 |
+ except KeyError:
|
|
208 |
+ pass
|
|
209 |
+ return 0
|
|
210 |
+ |
|
211 |
+ def query_n_bots_for_status(self, bot_status):
|
|
212 |
+ try:
|
|
213 |
+ if self.__bots_by_status is not None:
|
|
214 |
+ return len(self.__bots_by_status[bot_status])
|
|
215 |
+ except KeyError:
|
|
216 |
+ pass
|
|
217 |
+ return 0
|
|
218 |
+ |
|
219 |
+ # --- Private API ---
|
|
96 | 220 |
|
97 | 221 |
def _get_instance(self, name):
|
98 | 222 |
try:
|
... | ... | @@ -36,6 +36,10 @@ class ExecutionInstance: |
36 | 36 |
self._storage = storage
|
37 | 37 |
self._scheduler = scheduler
|
38 | 38 |
|
39 |
+ @property
|
|
40 |
+ def scheduler(self):
|
|
41 |
+ return self._scheduler
|
|
42 |
+ |
|
39 | 43 |
def register_instance_with_server(self, instance_name, server):
|
40 | 44 |
server.add_execution_instance(self, instance_name)
|
41 | 45 |
|
... | ... | @@ -33,30 +33,84 @@ from buildgrid._protos.google.longrunning import operations_pb2 |
33 | 33 |
|
34 | 34 |
class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
|
35 | 35 |
|
36 |
- def __init__(self, server):
|
|
36 |
+ def __init__(self, server, monitor=False):
|
|
37 | 37 |
self.__logger = logging.getLogger(__name__)
|
38 | 38 |
|
39 |
+ self.__peers_by_instance = None
|
|
40 |
+ self.__peers = None
|
|
41 |
+ |
|
39 | 42 |
self._instances = {}
|
43 |
+ |
|
40 | 44 |
remote_execution_pb2_grpc.add_ExecutionServicer_to_server(self, server)
|
41 | 45 |
|
42 |
- def add_instance(self, name, instance):
|
|
43 |
- self._instances[name] = instance
|
|
46 |
+ self._is_instrumented = monitor
|
|
47 |
+ |
|
48 |
+ if self._is_instrumented:
|
|
49 |
+ self.__peers_by_instance = {}
|
|
50 |
+ self.__peers = {}
|
|
51 |
+ |
|
52 |
+ # --- Public API ---
|
|
53 |
+ |
|
54 |
+ def add_instance(self, instance_name, instance):
|
|
55 |
+ """Registers a new servicer instance.
|
|
56 |
+ |
|
57 |
+ Args:
|
|
58 |
+ instance_name (str): The new instance's name.
|
|
59 |
+ instance (ExecutionInstance): The new instance itself.
|
|
60 |
+ """
|
|
61 |
+ self._instances[instance_name] = instance
|
|
62 |
+ |
|
63 |
+ if self._is_instrumented:
|
|
64 |
+ self.__peers_by_instance[instance_name] = set()
|
|
65 |
+ |
|
66 |
+ def get_scheduler(self, instance_name):
|
|
67 |
+ """Retrieves a reference to the scheduler for an instance.
|
|
68 |
+ |
|
69 |
+ Args:
|
|
70 |
+ instance_name (str): The name of the instance to query.
|
|
71 |
+ |
|
72 |
+ Returns:
|
|
73 |
+ Scheduler: A reference to the scheduler for `instance_name`.
|
|
74 |
+ |
|
75 |
+ Raises:
|
|
76 |
+ InvalidArgumentError: If no instance named `instance_name` exists.
|
|
77 |
+ """
|
|
78 |
+ instance = self._get_instance(instance_name)
|
|
79 |
+ |
|
80 |
+ return instance.scheduler
|
|
81 |
+ |
|
82 |
+ # --- Public API: Servicer ---
|
|
44 | 83 |
|
45 | 84 |
def Execute(self, request, context):
|
85 |
+ """Handles ExecuteRequest messages.
|
|
86 |
+ |
|
87 |
+ Args:
|
|
88 |
+ request (ExecuteRequest): The incoming RPC request.
|
|
89 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
90 |
+ """
|
|
46 | 91 |
self.__logger.debug("Execute request from [%s]", context.peer())
|
47 | 92 |
|
93 |
+ instance_name = request.instance_name
|
|
94 |
+ message_queue = queue.Queue()
|
|
95 |
+ peer = context.peer()
|
|
96 |
+ |
|
48 | 97 |
try:
|
49 |
- message_queue = queue.Queue()
|
|
50 |
- instance = self._get_instance(request.instance_name)
|
|
98 |
+ instance = self._get_instance(instance_name)
|
|
51 | 99 |
operation = instance.execute(request.action_digest,
|
52 | 100 |
request.skip_cache_lookup,
|
53 | 101 |
message_queue)
|
54 | 102 |
|
55 |
- context.add_callback(partial(instance.unregister_message_client,
|
|
56 |
- operation.name, message_queue))
|
|
103 |
+ context.add_callback(partial(self._rpc_termination_callback,
|
|
104 |
+ peer, instance_name, operation.name, message_queue))
|
|
57 | 105 |
|
58 |
- instanced_op_name = "{}/{}".format(request.instance_name,
|
|
59 |
- operation.name)
|
|
106 |
+ if self._is_instrumented:
|
|
107 |
+ if peer not in self.__peers:
|
|
108 |
+ self.__peers_by_instance[instance_name].add(peer)
|
|
109 |
+ self.__peers[peer] = 1
|
|
110 |
+ else:
|
|
111 |
+ self.__peers[peer] += 1
|
|
112 |
+ |
|
113 |
+ instanced_op_name = "{}/{}".format(instance_name, operation.name)
|
|
60 | 114 |
|
61 | 115 |
self.__logger.info("Operation name: [%s]", instanced_op_name)
|
62 | 116 |
|
... | ... | @@ -86,23 +140,33 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer): |
86 | 140 |
yield operations_pb2.Operation()
|
87 | 141 |
|
88 | 142 |
def WaitExecution(self, request, context):
|
89 |
- self.__logger.debug("WaitExecution request from [%s]", context.peer())
|
|
143 |
+ """Handles WaitExecutionRequest messages.
|
|
90 | 144 |
|
91 |
- try:
|
|
92 |
- names = request.name.split("/")
|
|
145 |
+ Args:
|
|
146 |
+ request (WaitExecutionRequest): The incoming RPC request.
|
|
147 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
148 |
+ """
|
|
149 |
+ self.__logger.debug("WaitExecution request from [%s]", context.peer())
|
|
93 | 150 |
|
94 |
- # Operation name should be in format:
|
|
95 |
- # {instance/name}/{operation_id}
|
|
96 |
- instance_name = ''.join(names[0:-1])
|
|
151 |
+ names = request.name.split('/')
|
|
152 |
+ instance_name = '/'.join(names[:-1])
|
|
153 |
+ operation_name = names[-1]
|
|
154 |
+ message_queue = queue.Queue()
|
|
155 |
+ peer = context.peer()
|
|
97 | 156 |
|
98 |
- message_queue = queue.Queue()
|
|
99 |
- operation_name = names[-1]
|
|
157 |
+ try:
|
|
100 | 158 |
instance = self._get_instance(instance_name)
|
101 | 159 |
|
102 | 160 |
instance.register_message_client(operation_name, message_queue)
|
161 |
+ context.add_callback(partial(self._rpc_termination_callback,
|
|
162 |
+ peer, instance_name, operation_name, message_queue))
|
|
103 | 163 |
|
104 |
- context.add_callback(partial(instance.unregister_message_client,
|
|
105 |
- operation_name, message_queue))
|
|
164 |
+ if self._is_instrumented:
|
|
165 |
+ if peer not in self.__peers:
|
|
166 |
+ self.__peers_by_instance[instance_name].add(peer)
|
|
167 |
+ self.__peers[peer] = 1
|
|
168 |
+ else:
|
|
169 |
+ self.__peers[peer] += 1
|
|
106 | 170 |
|
107 | 171 |
for operation in instance.stream_operation_updates(message_queue,
|
108 | 172 |
operation_name):
|
... | ... | @@ -123,6 +187,39 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer): |
123 | 187 |
context.set_code(grpc.StatusCode.CANCELLED)
|
124 | 188 |
yield operations_pb2.Operation()
|
125 | 189 |
|
190 |
+ # --- Public API: Monitoring ---
|
|
191 |
+ |
|
192 |
+ @property
|
|
193 |
+ def is_instrumented(self):
|
|
194 |
+ return self._is_instrumented
|
|
195 |
+ |
|
196 |
+ def query_n_clients(self):
|
|
197 |
+ if self.__peers is not None:
|
|
198 |
+ return len(self.__peers)
|
|
199 |
+ return 0
|
|
200 |
+ |
|
201 |
+ def query_n_clients_for_instance(self, instance_name):
|
|
202 |
+ try:
|
|
203 |
+ if self.__peers_by_instance is not None:
|
|
204 |
+ return len(self.__peers_by_instance[instance_name])
|
|
205 |
+ except KeyError:
|
|
206 |
+ pass
|
|
207 |
+ return 0
|
|
208 |
+ |
|
209 |
+ # --- Private API ---
|
|
210 |
+ |
|
211 |
+ def _rpc_termination_callback(self, peer, instance_name, job_name, message_queue):
|
|
212 |
+ instance = self._get_instance(instance_name)
|
|
213 |
+ |
|
214 |
+ instance.unregister_message_client(job_name, message_queue)
|
|
215 |
+ |
|
216 |
+ if self._is_instrumented:
|
|
217 |
+ if self.__peers[peer] > 1:
|
|
218 |
+ self.__peers[peer] -= 1
|
|
219 |
+ else:
|
|
220 |
+ self.__peers_by_instance[instance_name].remove(peer)
|
|
221 |
+ del self.__peers[peer]
|
|
222 |
+ |
|
126 | 223 |
def _get_instance(self, name):
|
127 | 224 |
try:
|
128 | 225 |
return self._instances[name]
|
... | ... | @@ -15,13 +15,18 @@ |
15 | 15 |
|
16 | 16 |
import asyncio
|
17 | 17 |
from concurrent import futures
|
18 |
+from datetime import timedelta
|
|
18 | 19 |
import logging
|
19 | 20 |
import os
|
20 | 21 |
import signal
|
22 |
+import time
|
|
21 | 23 |
|
22 | 24 |
import grpc
|
23 | 25 |
|
26 |
+from buildgrid._enums import BotStatus, MetricRecordDomain, MetricRecordType
|
|
27 |
+from buildgrid._protos.buildgrid.v2 import monitoring_pb2
|
|
24 | 28 |
from buildgrid.server.actioncache.service import ActionCacheService
|
29 |
+from buildgrid.server._authentication import JwtAlgorithm, AuthMetadataServerInterceptor
|
|
25 | 30 |
from buildgrid.server.bots.service import BotsService
|
26 | 31 |
from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
|
27 | 32 |
from buildgrid.server.execution.service import ExecutionService
|
... | ... | @@ -30,6 +35,7 @@ from buildgrid.server.operations.service import OperationsService |
30 | 35 |
from buildgrid.server.referencestorage.service import ReferenceStorageService
|
31 | 36 |
from buildgrid.server.capabilities.instance import CapabilitiesInstance
|
32 | 37 |
from buildgrid.server.capabilities.service import CapabilitiesService
|
38 |
+from buildgrid.settings import MONITORING_PERIOD
|
|
33 | 39 |
|
34 | 40 |
|
35 | 41 |
class BuildGridServer:
|
... | ... | @@ -52,11 +58,15 @@ class BuildGridServer: |
52 | 58 |
max_workers = (os.cpu_count() or 1) * 5
|
53 | 59 |
|
54 | 60 |
self.__grpc_executor = futures.ThreadPoolExecutor(max_workers)
|
55 |
- self.__grpc_server = grpc.server(self.__grpc_executor)
|
|
61 |
+ self.__grpc_auth_interceptor = AuthMetadataServerInterceptor('your-256-bit-secret', JwtAlgorithm.HS256)
|
|
62 |
+ self.__grpc_server = grpc.server(
|
|
63 |
+ self.__grpc_executor, interceptors=(self.__grpc_auth_interceptor,))
|
|
56 | 64 |
|
57 | 65 |
self.__main_loop = asyncio.get_event_loop()
|
58 | 66 |
self.__monitoring_bus = None
|
59 | 67 |
|
68 |
+ self.__state_monitoring_task = None
|
|
69 |
+ |
|
60 | 70 |
# We always want a capabilities service
|
61 | 71 |
self._capabilities_service = CapabilitiesService(self.__grpc_server)
|
62 | 72 |
|
... | ... | @@ -68,6 +78,9 @@ class BuildGridServer: |
68 | 78 |
self._cas_service = None
|
69 | 79 |
self._bytestream_service = None
|
70 | 80 |
|
81 |
+ self._schedulers = {}
|
|
82 |
+ self._instances = set()
|
|
83 |
+ |
|
71 | 84 |
self._is_instrumented = monitor
|
72 | 85 |
|
73 | 86 |
if self._is_instrumented:
|
... | ... | @@ -84,6 +97,10 @@ class BuildGridServer: |
84 | 97 |
if self._is_instrumented:
|
85 | 98 |
self.__monitoring_bus.start()
|
86 | 99 |
|
100 |
+ self.__state_monitoring_task = asyncio.ensure_future(
|
|
101 |
+ self._state_monitoring_worker(period=MONITORING_PERIOD),
|
|
102 |
+ loop=self.__main_loop)
|
|
103 |
+ |
|
87 | 104 |
self.__main_loop.add_signal_handler(signal.SIGTERM, self.stop)
|
88 | 105 |
|
89 | 106 |
self.__main_loop.run_forever()
|
... | ... | @@ -91,6 +108,9 @@ class BuildGridServer: |
91 | 108 |
def stop(self):
|
92 | 109 |
"""Stops the BuildGrid server."""
|
93 | 110 |
if self._is_instrumented:
|
111 |
+ if self.__state_monitoring_task is not None:
|
|
112 |
+ self.__state_monitoring_task.cancel()
|
|
113 |
+ |
|
94 | 114 |
self.__monitoring_bus.stop()
|
95 | 115 |
|
96 | 116 |
self.__main_loop.stop()
|
... | ... | @@ -130,11 +150,15 @@ class BuildGridServer: |
130 | 150 |
instance_name (str): Instance name.
|
131 | 151 |
"""
|
132 | 152 |
if self._execution_service is None:
|
133 |
- self._execution_service = ExecutionService(self.__grpc_server)
|
|
153 |
+ self._execution_service = ExecutionService(
|
|
154 |
+ self.__grpc_server, monitor=self._is_instrumented)
|
|
134 | 155 |
|
135 | 156 |
self._execution_service.add_instance(instance_name, instance)
|
136 | 157 |
self._add_capabilities_instance(instance_name, execution_instance=instance)
|
137 | 158 |
|
159 |
+ self._schedulers[instance_name] = instance.scheduler
|
|
160 |
+ self._instances.add(instance_name)
|
|
161 |
+ |
|
138 | 162 |
def add_bots_interface(self, instance, instance_name):
|
139 | 163 |
"""Adds a :obj:`BotsInterface` to the service.
|
140 | 164 |
|
... | ... | @@ -145,10 +169,13 @@ class BuildGridServer: |
145 | 169 |
instance_name (str): Instance name.
|
146 | 170 |
"""
|
147 | 171 |
if self._bots_service is None:
|
148 |
- self._bots_service = BotsService(self.__grpc_server)
|
|
172 |
+ self._bots_service = BotsService(
|
|
173 |
+ self.__grpc_server, monitor=self._is_instrumented)
|
|
149 | 174 |
|
150 | 175 |
self._bots_service.add_instance(instance_name, instance)
|
151 | 176 |
|
177 |
+ self._instances.add(instance_name)
|
|
178 |
+ |
|
152 | 179 |
def add_operations_instance(self, instance, instance_name):
|
153 | 180 |
"""Adds an :obj:`OperationsInstance` to the service.
|
154 | 181 |
|
... | ... | @@ -221,6 +248,14 @@ class BuildGridServer: |
221 | 248 |
|
222 | 249 |
self._bytestream_service.add_instance(instance_name, instance)
|
223 | 250 |
|
251 |
+ # --- Public API: Monitoring ---
|
|
252 |
+ |
|
253 |
+ @property
|
|
254 |
+ def is_instrumented(self):
|
|
255 |
+ return self._is_instrumented
|
|
256 |
+ |
|
257 |
+ # --- Private API ---
|
|
258 |
+ |
|
224 | 259 |
def _add_capabilities_instance(self, instance_name,
|
225 | 260 |
cas_instance=None,
|
226 | 261 |
action_cache_instance=None,
|
... | ... | @@ -246,8 +281,152 @@ class BuildGridServer: |
246 | 281 |
execution_instance)
|
247 | 282 |
self._capabilities_service.add_instance(instance_name, capabilities_instance)
|
248 | 283 |
|
249 |
- # --- Public API: Monitoring ---
|
|
284 |
+ async def _state_monitoring_worker(self, period=1.0):
|
|
285 |
+ """Periodically publishes state metrics to the monitoring bus."""
|
|
286 |
+ async def __state_monitoring_worker():
|
|
287 |
+ # Emit total clients count record:
|
|
288 |
+ _, record = self._query_n_clients()
|
|
289 |
+ await self.__monitoring_bus.send_record(record)
|
|
290 |
+ |
|
291 |
+ # Emit total bots count record:
|
|
292 |
+ _, record = self._query_n_bots()
|
|
293 |
+ await self.__monitoring_bus.send_record(record)
|
|
294 |
+ |
|
295 |
+ queue_times = []
|
|
296 |
+ # Emits records by instance:
|
|
297 |
+ for instance_name in self._instances:
|
|
298 |
+ # Emit instance clients count record:
|
|
299 |
+ _, record = self._query_n_clients_for_instance(instance_name)
|
|
300 |
+ await self.__monitoring_bus.send_record(record)
|
|
301 |
+ |
|
302 |
+ # Emit instance bots count record:
|
|
303 |
+ _, record = self._query_n_bots_for_instance(instance_name)
|
|
304 |
+ await self.__monitoring_bus.send_record(record)
|
|
305 |
+ |
|
306 |
+ # Emit instance average queue time record:
|
|
307 |
+ queue_time, record = self._query_am_queue_time_for_instance(instance_name)
|
|
308 |
+ await self.__monitoring_bus.send_record(record)
|
|
309 |
+ if queue_time:
|
|
310 |
+ queue_times.append(queue_time)
|
|
311 |
+ |
|
312 |
+ # Emits records by bot status:
|
|
313 |
+ for bot_status in [BotStatus.OK, BotStatus.UNHEALTHY]:
|
|
314 |
+ # Emit status bots count record:
|
|
315 |
+ _, record = self._query_n_bots_for_status(bot_status)
|
|
316 |
+ await self.__monitoring_bus.send_record(record)
|
|
317 |
+ |
|
318 |
+ # Emit overall average queue time record:
|
|
319 |
+ if queue_times:
|
|
320 |
+ am_queue_time = sum(queue_times, timedelta()) / len(queue_times)
|
|
321 |
+ else:
|
|
322 |
+ am_queue_time = timedelta()
|
|
323 |
+ record = self._forge_timer_metric_record(
|
|
324 |
+ MetricRecordDomain.STATE,
|
|
325 |
+ 'average-queue-time',
|
|
326 |
+ am_queue_time)
|
|
327 |
+ |
|
328 |
+ await self.__monitoring_bus.send_record(record)
|
|
250 | 329 |
|
251 |
- @property
|
|
252 |
- def is_instrumented(self):
|
|
253 |
- return self._is_instrumented
|
|
330 |
+ try:
|
|
331 |
+ while True:
|
|
332 |
+ start = time.time()
|
|
333 |
+ await __state_monitoring_worker()
|
|
334 |
+ |
|
335 |
+ end = time.time()
|
|
336 |
+ await asyncio.sleep(period - (end - start))
|
|
337 |
+ |
|
338 |
+ except asyncio.CancelledError:
|
|
339 |
+ pass
|
|
340 |
+ |
|
341 |
+ def _forge_counter_metric_record(self, domain, name, count, metadata=None):
|
|
342 |
+ counter_record = monitoring_pb2.MetricRecord()
|
|
343 |
+ |
|
344 |
+ counter_record.creation_timestamp.GetCurrentTime()
|
|
345 |
+ counter_record.domain = domain.value
|
|
346 |
+ counter_record.type = MetricRecordType.COUNTER.value
|
|
347 |
+ counter_record.name = name
|
|
348 |
+ counter_record.count = count
|
|
349 |
+ if metadata is not None:
|
|
350 |
+ counter_record.metadata.update(metadata)
|
|
351 |
+ |
|
352 |
+ return counter_record
|
|
353 |
+ |
|
354 |
+ def _forge_timer_metric_record(self, domain, name, duration, metadata=None):
|
|
355 |
+ timer_record = monitoring_pb2.MetricRecord()
|
|
356 |
+ |
|
357 |
+ timer_record.creation_timestamp.GetCurrentTime()
|
|
358 |
+ timer_record.domain = domain.value
|
|
359 |
+ timer_record.type = MetricRecordType.TIMER.value
|
|
360 |
+ timer_record.name = name
|
|
361 |
+ timer_record.duration.FromTimedelta(duration)
|
|
362 |
+ if metadata is not None:
|
|
363 |
+ timer_record.metadata.update(metadata)
|
|
364 |
+ |
|
365 |
+ return timer_record
|
|
366 |
+ |
|
367 |
+ def _forge_gauge_metric_record(self, domain, name, value, metadata=None):
|
|
368 |
+ gauge_record = monitoring_pb2.MetricRecord()
|
|
369 |
+ |
|
370 |
+ gauge_record.creation_timestamp.GetCurrentTime()
|
|
371 |
+ gauge_record.domain = domain.value
|
|
372 |
+ gauge_record.type = MetricRecordType.GAUGE.value
|
|
373 |
+ gauge_record.name = name
|
|
374 |
+ gauge_record.value = value
|
|
375 |
+ if metadata is not None:
|
|
376 |
+ gauge_record.metadata.update(metadata)
|
|
377 |
+ |
|
378 |
+ return gauge_record
|
|
379 |
+ |
|
380 |
+ # --- Private API: Monitoring ---
|
|
381 |
+ |
|
382 |
+ def _query_n_clients(self):
|
|
383 |
+ """Queries the number of clients connected."""
|
|
384 |
+ n_clients = self._execution_service.query_n_clients()
|
|
385 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
386 |
+ MetricRecordDomain.STATE, 'clients-count', n_clients)
|
|
387 |
+ |
|
388 |
+ return n_clients, gauge_record
|
|
389 |
+ |
|
390 |
+ def _query_n_clients_for_instance(self, instance_name):
|
|
391 |
+ """Queries the number of clients connected for a given instance"""
|
|
392 |
+ n_clients = self._execution_service.query_n_clients_for_instance(instance_name)
|
|
393 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
394 |
+ MetricRecordDomain.STATE, 'clients-count', n_clients,
|
|
395 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
396 |
+ |
|
397 |
+ return n_clients, gauge_record
|
|
398 |
+ |
|
399 |
+ def _query_n_bots(self):
|
|
400 |
+ """Queries the number of bots connected."""
|
|
401 |
+ n_bots = self._bots_service.query_n_bots()
|
|
402 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
403 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots)
|
|
404 |
+ |
|
405 |
+ return n_bots, gauge_record
|
|
406 |
+ |
|
407 |
+ def _query_n_bots_for_instance(self, instance_name):
|
|
408 |
+ """Queries the number of bots connected for a given instance."""
|
|
409 |
+ n_bots = self._bots_service.query_n_bots_for_instance(instance_name)
|
|
410 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
411 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots,
|
|
412 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
413 |
+ |
|
414 |
+ return n_bots, gauge_record
|
|
415 |
+ |
|
416 |
+ def _query_n_bots_for_status(self, bot_status):
|
|
417 |
+ """Queries the number of bots connected for a given health status."""
|
|
418 |
+ n_bots = self._bots_service.query_n_bots_for_status(bot_status)
|
|
419 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
420 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots,
|
|
421 |
+ metadata={'bot-status': bot_status.name})
|
|
422 |
+ |
|
423 |
+ return n_bots, gauge_record
|
|
424 |
+ |
|
425 |
+ def _query_am_queue_time_for_instance(self, instance_name):
|
|
426 |
+ """Queries the average job's queue time for a given instance."""
|
|
427 |
+ am_queue_time = self._schedulers[instance_name].query_am_queue_time()
|
|
428 |
+ timer_record = self._forge_timer_metric_record(
|
|
429 |
+ MetricRecordDomain.STATE, 'average-queue-time', am_queue_time,
|
|
430 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
431 |
+ |
|
432 |
+ return am_queue_time, timer_record
|
... | ... | @@ -13,10 +13,11 @@ |
13 | 13 |
# limitations under the License.
|
14 | 14 |
|
15 | 15 |
|
16 |
+from datetime import datetime
|
|
16 | 17 |
import logging
|
17 | 18 |
import uuid
|
18 | 19 |
|
19 |
-from google.protobuf import timestamp_pb2
|
|
20 |
+from google.protobuf import duration_pb2, timestamp_pb2
|
|
20 | 21 |
|
21 | 22 |
from buildgrid._enums import LeaseState, OperationStage
|
22 | 23 |
from buildgrid._exceptions import CancelledError
|
... | ... | @@ -40,6 +41,7 @@ class Job: |
40 | 41 |
self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata()
|
41 | 42 |
|
42 | 43 |
self.__queued_timestamp = timestamp_pb2.Timestamp()
|
44 |
+ self.__queued_time_duration = duration_pb2.Duration()
|
|
43 | 45 |
self.__worker_start_timestamp = timestamp_pb2.Timestamp()
|
44 | 46 |
self.__worker_completed_timestamp = timestamp_pb2.Timestamp()
|
45 | 47 |
|
... | ... | @@ -56,6 +58,8 @@ class Job: |
56 | 58 |
self._operation.done = False
|
57 | 59 |
self._n_tries = 0
|
58 | 60 |
|
61 |
+ # --- Public API ---
|
|
62 |
+ |
|
59 | 63 |
@property
|
60 | 64 |
def name(self):
|
61 | 65 |
return self._name
|
... | ... | @@ -193,7 +197,7 @@ class Job: |
193 | 197 |
result.Unpack(action_result)
|
194 | 198 |
|
195 | 199 |
action_metadata = action_result.execution_metadata
|
196 |
- action_metadata.queued_timestamp.CopyFrom(self.__worker_start_timestamp)
|
|
200 |
+ action_metadata.queued_timestamp.CopyFrom(self.__queued_timestamp)
|
|
197 | 201 |
action_metadata.worker_start_timestamp.CopyFrom(self.__worker_start_timestamp)
|
198 | 202 |
action_metadata.worker_completed_timestamp.CopyFrom(self.__worker_completed_timestamp)
|
199 | 203 |
|
... | ... | @@ -227,6 +231,10 @@ class Job: |
227 | 231 |
self.__queued_timestamp.GetCurrentTime()
|
228 | 232 |
self._n_tries += 1
|
229 | 233 |
|
234 |
+ elif self.__operation_metadata.stage == OperationStage.EXECUTING.value:
|
|
235 |
+ queue_in, queue_out = self.__queued_timestamp.ToDatetime(), datetime.now()
|
|
236 |
+ self.__queued_time_duration.FromTimedelta(queue_out - queue_in)
|
|
237 |
+ |
|
230 | 238 |
elif self.__operation_metadata.stage == OperationStage.COMPLETED.value:
|
231 | 239 |
if self.__execute_response is not None:
|
232 | 240 |
self._operation.response.Pack(self.__execute_response)
|
... | ... | @@ -260,3 +268,11 @@ class Job: |
260 | 268 |
self.__execute_response.status.message = "Operation cancelled by client."
|
261 | 269 |
|
262 | 270 |
self.update_operation_stage(OperationStage.COMPLETED)
|
271 |
+ |
|
272 |
+ # --- Public API: Monitoring ---
|
|
273 |
+ |
|
274 |
+ def query_queue_time(self):
|
|
275 |
+ return self.__queued_time_duration.ToTimedelta()
|
|
276 |
+ |
|
277 |
+ def query_n_retries(self):
|
|
278 |
+ return self._n_tries - 1 if self._n_tries > 0 else 0
|
... | ... | @@ -32,6 +32,10 @@ class OperationsInstance: |
32 | 32 |
|
33 | 33 |
self._scheduler = scheduler
|
34 | 34 |
|
35 |
+ @property
|
|
36 |
+ def scheduler(self):
|
|
37 |
+ return self._scheduler
|
|
38 |
+ |
|
35 | 39 |
def register_instance_with_server(self, instance_name, server):
|
36 | 40 |
server.add_operations_instance(self, instance_name)
|
37 | 41 |
|
... | ... | @@ -38,8 +38,18 @@ class OperationsService(operations_pb2_grpc.OperationsServicer): |
38 | 38 |
|
39 | 39 |
operations_pb2_grpc.add_OperationsServicer_to_server(self, server)
|
40 | 40 |
|
41 |
- def add_instance(self, name, instance):
|
|
42 |
- self._instances[name] = instance
|
|
41 |
+ # --- Public API ---
|
|
42 |
+ |
|
43 |
+ def add_instance(self, instance_name, instance):
|
|
44 |
+ """Registers a new servicer instance.
|
|
45 |
+ |
|
46 |
+ Args:
|
|
47 |
+ instance_name (str): The new instance's name.
|
|
48 |
+ instance (OperationsInstance): The new instance itself.
|
|
49 |
+ """
|
|
50 |
+ self._instances[instance_name] = instance
|
|
51 |
+ |
|
52 |
+ # --- Public API: Servicer ---
|
|
43 | 53 |
|
44 | 54 |
def GetOperation(self, request, context):
|
45 | 55 |
self.__logger.debug("GetOperation request from [%s]", context.peer())
|
... | ... | @@ -127,6 +137,8 @@ class OperationsService(operations_pb2_grpc.OperationsServicer): |
127 | 137 |
|
128 | 138 |
return Empty()
|
129 | 139 |
|
140 |
+ # --- Private API ---
|
|
141 |
+ |
|
130 | 142 |
def _parse_instance_name(self, name):
|
131 | 143 |
""" If the instance name is not blank, 'name' will have the form
|
132 | 144 |
{instance_name}/{operation_uuid}. Otherwise, it will just be
|
... | ... | @@ -20,33 +20,70 @@ Schedules jobs. |
20 | 20 |
"""
|
21 | 21 |
|
22 | 22 |
from collections import deque
|
23 |
+from datetime import timedelta
|
|
23 | 24 |
import logging
|
24 | 25 |
|
26 |
+from buildgrid._enums import LeaseState, OperationStage
|
|
25 | 27 |
from buildgrid._exceptions import NotFoundError
|
26 | 28 |
|
27 |
-from .job import OperationStage, LeaseState
|
|
28 |
- |
|
29 | 29 |
|
30 | 30 |
class Scheduler:
|
31 | 31 |
|
32 | 32 |
MAX_N_TRIES = 5
|
33 | 33 |
|
34 |
- def __init__(self, action_cache=None):
|
|
34 |
+ def __init__(self, action_cache=None, monitor=False):
|
|
35 | 35 |
self.__logger = logging.getLogger(__name__)
|
36 | 36 |
|
37 |
+ self.__operations_by_stage = None
|
|
38 |
+ self.__leases_by_state = None
|
|
39 |
+ self.__queue_time_average = None
|
|
40 |
+ self.__retries_count = 0
|
|
41 |
+ |
|
37 | 42 |
self._action_cache = action_cache
|
38 | 43 |
self.jobs = {}
|
39 | 44 |
self.queue = deque()
|
40 | 45 |
|
46 |
+ self._is_instrumented = monitor
|
|
47 |
+ |
|
48 |
+ if self._is_instrumented:
|
|
49 |
+ self.__operations_by_stage = {}
|
|
50 |
+ self.__leases_by_state = {}
|
|
51 |
+ self.__queue_time_average = 0, timedelta()
|
|
52 |
+ |
|
53 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK] = set()
|
|
54 |
+ self.__operations_by_stage[OperationStage.QUEUED] = set()
|
|
55 |
+ self.__operations_by_stage[OperationStage.EXECUTING] = set()
|
|
56 |
+ self.__operations_by_stage[OperationStage.COMPLETED] = set()
|
|
57 |
+ |
|
58 |
+ self.__leases_by_state[LeaseState.PENDING] = set()
|
|
59 |
+ self.__leases_by_state[LeaseState.ACTIVE] = set()
|
|
60 |
+ self.__leases_by_state[LeaseState.COMPLETED] = set()
|
|
61 |
+ |
|
62 |
+ # --- Public API ---
|
|
63 |
+ |
|
41 | 64 |
def register_client(self, job_name, queue):
|
42 |
- self.jobs[job_name].register_client(queue)
|
|
65 |
+ job = self.jobs[job_name]
|
|
66 |
+ |
|
67 |
+ job.register_client(queue)
|
|
43 | 68 |
|
44 | 69 |
def unregister_client(self, job_name, queue):
|
45 |
- self.jobs[job_name].unregister_client(queue)
|
|
70 |
+ job = self.jobs[job_name]
|
|
46 | 71 |
|
47 |
- if not self.jobs[job_name].n_clients and self.jobs[job_name].operation.done:
|
|
72 |
+ job.unregister_client(queue)
|
|
73 |
+ |
|
74 |
+ if not job.n_clients and job.operation.done:
|
|
48 | 75 |
del self.jobs[job_name]
|
49 | 76 |
|
77 |
+ if self._is_instrumented:
|
|
78 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
79 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
80 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
81 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
82 |
+ |
|
83 |
+ self.__leases_by_state[LeaseState.PENDING].discard(job_name)
|
|
84 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(job_name)
|
|
85 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(job_name)
|
|
86 |
+ |
|
50 | 87 |
def queue_job(self, job, skip_cache_lookup=False):
|
51 | 88 |
self.jobs[job.name] = job
|
52 | 89 |
|
... | ... | @@ -62,23 +99,30 @@ class Scheduler: |
62 | 99 |
job.set_cached_result(action_result)
|
63 | 100 |
operation_stage = OperationStage.COMPLETED
|
64 | 101 |
|
102 |
+ if self._is_instrumented:
|
|
103 |
+ self.__retries_count += 1
|
|
104 |
+ |
|
65 | 105 |
else:
|
66 | 106 |
operation_stage = OperationStage.QUEUED
|
67 | 107 |
self.queue.append(job)
|
68 | 108 |
|
69 |
- job.update_operation_stage(operation_stage)
|
|
109 |
+ self._update_job_operation_stage(job.name, operation_stage)
|
|
70 | 110 |
|
71 | 111 |
def retry_job(self, job_name):
|
72 |
- if job_name in self.jobs:
|
|
73 |
- job = self.jobs[job_name]
|
|
74 |
- if job.n_tries >= self.MAX_N_TRIES:
|
|
75 |
- # TODO: Decide what to do with these jobs
|
|
76 |
- job.update_operation_stage(OperationStage.COMPLETED)
|
|
77 |
- # TODO: Mark these jobs as done
|
|
78 |
- else:
|
|
79 |
- job.update_operation_stage(OperationStage.QUEUED)
|
|
80 |
- job.update_lease_state(LeaseState.PENDING)
|
|
81 |
- self.queue.append(job)
|
|
112 |
+ job = self.jobs[job_name]
|
|
113 |
+ |
|
114 |
+ operation_stage = None
|
|
115 |
+ if job.n_tries >= self.MAX_N_TRIES:
|
|
116 |
+ # TODO: Decide what to do with these jobs
|
|
117 |
+ operation_stage = OperationStage.COMPLETED
|
|
118 |
+ # TODO: Mark these jobs as done
|
|
119 |
+ |
|
120 |
+ else:
|
|
121 |
+ operation_stage = OperationStage.QUEUED
|
|
122 |
+ job.update_lease_state(LeaseState.PENDING)
|
|
123 |
+ self.queue.append(job)
|
|
124 |
+ |
|
125 |
+ self._update_job_operation_stage(job_name, operation_stage)
|
|
82 | 126 |
|
83 | 127 |
def list_jobs(self):
|
84 | 128 |
return self.jobs.values()
|
... | ... | @@ -118,17 +162,27 @@ class Scheduler: |
118 | 162 |
lease_result (google.protobuf.Any): the lease execution result, only
|
119 | 163 |
required if `lease_state` is `COMPLETED`.
|
120 | 164 |
"""
|
121 |
- |
|
122 | 165 |
job = self.jobs[lease.id]
|
123 | 166 |
lease_state = LeaseState(lease.state)
|
124 | 167 |
|
168 |
+ operation_stage = None
|
|
125 | 169 |
if lease_state == LeaseState.PENDING:
|
126 | 170 |
job.update_lease_state(LeaseState.PENDING)
|
127 |
- job.update_operation_stage(OperationStage.QUEUED)
|
|
171 |
+ operation_stage = OperationStage.QUEUED
|
|
172 |
+ |
|
173 |
+ if self._is_instrumented:
|
|
174 |
+ self.__leases_by_state[LeaseState.PENDING].add(lease.id)
|
|
175 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
|
|
176 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
|
|
128 | 177 |
|
129 | 178 |
elif lease_state == LeaseState.ACTIVE:
|
130 | 179 |
job.update_lease_state(LeaseState.ACTIVE)
|
131 |
- job.update_operation_stage(OperationStage.EXECUTING)
|
|
180 |
+ operation_stage = OperationStage.EXECUTING
|
|
181 |
+ |
|
182 |
+ if self._is_instrumented:
|
|
183 |
+ self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
|
|
184 |
+ self.__leases_by_state[LeaseState.ACTIVE].add(lease.id)
|
|
185 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
|
|
132 | 186 |
|
133 | 187 |
elif lease_state == LeaseState.COMPLETED:
|
134 | 188 |
job.update_lease_state(LeaseState.COMPLETED,
|
... | ... | @@ -137,7 +191,14 @@ class Scheduler: |
137 | 191 |
if self._action_cache is not None and not job.do_not_cache:
|
138 | 192 |
self._action_cache.update_action_result(job.action_digest, job.action_result)
|
139 | 193 |
|
140 |
- job.update_operation_stage(OperationStage.COMPLETED)
|
|
194 |
+ operation_stage = OperationStage.COMPLETED
|
|
195 |
+ |
|
196 |
+ if self._is_instrumented:
|
|
197 |
+ self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
|
|
198 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
|
|
199 |
+ self.__leases_by_state[LeaseState.COMPLETED].add(lease.id)
|
|
200 |
+ |
|
201 |
+ self._update_job_operation_stage(lease.id, operation_stage)
|
|
141 | 202 |
|
142 | 203 |
def get_job_lease(self, job_name):
|
143 | 204 |
"""Returns the lease associated to job, if any have been emitted yet."""
|
... | ... | @@ -160,3 +221,101 @@ class Scheduler: |
160 | 221 |
job_name (str): name of the job holding the operation to cancel.
|
161 | 222 |
"""
|
162 | 223 |
self.jobs[job_name].cancel_operation()
|
224 |
+ |
|
225 |
+ # --- Public API: Monitoring ---
|
|
226 |
+ |
|
227 |
+ @property
|
|
228 |
+ def is_instrumented(self):
|
|
229 |
+ return self._is_instrumented
|
|
230 |
+ |
|
231 |
+ def query_n_jobs(self):
|
|
232 |
+ return len(self.jobs)
|
|
233 |
+ |
|
234 |
+ def query_n_operations(self):
|
|
235 |
+ # For now n_operations == n_jobs:
|
|
236 |
+ return len(self.jobs)
|
|
237 |
+ |
|
238 |
+ def query_n_operations_by_stage(self, operation_stage):
|
|
239 |
+ try:
|
|
240 |
+ if self.__operations_by_stage is not None:
|
|
241 |
+ return len(self.__operations_by_stage[operation_stage])
|
|
242 |
+ except KeyError:
|
|
243 |
+ pass
|
|
244 |
+ return 0
|
|
245 |
+ |
|
246 |
+ def query_n_leases(self):
|
|
247 |
+ return len(self.jobs)
|
|
248 |
+ |
|
249 |
+ def query_n_leases_by_state(self, lease_state):
|
|
250 |
+ try:
|
|
251 |
+ if self.__leases_by_state is not None:
|
|
252 |
+ return len(self.__leases_by_state[lease_state])
|
|
253 |
+ except KeyError:
|
|
254 |
+ pass
|
|
255 |
+ return 0
|
|
256 |
+ |
|
257 |
+ def query_n_retries(self):
|
|
258 |
+ return self.__retries_count
|
|
259 |
+ |
|
260 |
+ def query_am_queue_time(self):
|
|
261 |
+ if self.__queue_time_average is not None:
|
|
262 |
+ return self.__queue_time_average[1]
|
|
263 |
+ return timedelta()
|
|
264 |
+ |
|
265 |
+ # --- Private API ---
|
|
266 |
+ |
|
267 |
+ def _update_job_operation_stage(self, job_name, operation_stage):
|
|
268 |
+ """Requests a stage transition for the job's :class:Operations.
|
|
269 |
+ |
|
270 |
+ Args:
|
|
271 |
+ job_name (str): name of the job to query.
|
|
272 |
+ operation_stage (OperationStage): the stage to transition to.
|
|
273 |
+ """
|
|
274 |
+ job = self.jobs[job_name]
|
|
275 |
+ |
|
276 |
+ if operation_stage == OperationStage.CACHE_CHECK:
|
|
277 |
+ job.update_operation_stage(OperationStage.CACHE_CHECK)
|
|
278 |
+ |
|
279 |
+ if self._is_instrumented:
|
|
280 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].add(job_name)
|
|
281 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
282 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
283 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
284 |
+ |
|
285 |
+ elif operation_stage == OperationStage.QUEUED:
|
|
286 |
+ job.update_operation_stage(OperationStage.QUEUED)
|
|
287 |
+ |
|
288 |
+ if self._is_instrumented:
|
|
289 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
290 |
+ self.__operations_by_stage[OperationStage.QUEUED].add(job_name)
|
|
291 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
292 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
293 |
+ |
|
294 |
+ elif operation_stage == OperationStage.EXECUTING:
|
|
295 |
+ job.update_operation_stage(OperationStage.EXECUTING)
|
|
296 |
+ |
|
297 |
+ if self._is_instrumented:
|
|
298 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
299 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
300 |
+ self.__operations_by_stage[OperationStage.EXECUTING].add(job_name)
|
|
301 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
302 |
+ |
|
303 |
+ elif operation_stage == OperationStage.COMPLETED:
|
|
304 |
+ job.update_operation_stage(OperationStage.COMPLETED)
|
|
305 |
+ |
|
306 |
+ if self._is_instrumented:
|
|
307 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
308 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
309 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
310 |
+ self.__operations_by_stage[OperationStage.COMPLETED].add(job_name)
|
|
311 |
+ |
|
312 |
+ average_order, average_time = self.__queue_time_average
|
|
313 |
+ |
|
314 |
+ average_order += 1
|
|
315 |
+ if average_order <= 1:
|
|
316 |
+ average_time = job.query_queue_time()
|
|
317 |
+ else:
|
|
318 |
+ queue_time = job.query_queue_time()
|
|
319 |
+ average_time = average_time + ((queue_time - average_time) / average_order)
|
|
320 |
+ |
|
321 |
+ self.__queue_time_average = average_order, average_time
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
1 | 16 |
import hashlib
|
2 | 17 |
|
3 | 18 |
|
4 |
-# The hash function that CAS uses
|
|
19 |
+# Hash function used for computing digests:
|
|
5 | 20 |
HASH = hashlib.sha256
|
21 |
+ |
|
22 |
+# Lenght in bytes of a hash string returned by HASH:
|
|
6 | 23 |
HASH_LENGTH = HASH().digest_size * 2
|
24 |
+ |
|
25 |
+# Period, in seconds, for the monitoring cycle:
|
|
26 |
+MONITORING_PERIOD = 5.0
|
... | ... | @@ -112,13 +112,16 @@ setup( |
112 | 112 |
license="Apache License, Version 2.0",
|
113 | 113 |
description="A remote execution service",
|
114 | 114 |
packages=find_packages(),
|
115 |
+ python_requires='>= 3.5.3', # janus requirement
|
|
115 | 116 |
install_requires=[
|
116 |
- 'protobuf',
|
|
117 |
- 'grpcio',
|
|
118 |
- 'Click',
|
|
119 |
- 'PyYAML',
|
|
120 | 117 |
'boto3 < 1.8.0',
|
121 | 118 |
'botocore < 1.11.0',
|
119 |
+ 'click',
|
|
120 |
+ 'grpcio',
|
|
121 |
+ 'janus',
|
|
122 |
+ 'protobuf',
|
|
123 |
+ 'pyjwt',
|
|
124 |
+ 'pyyaml',
|
|
122 | 125 |
],
|
123 | 126 |
entry_points={
|
124 | 127 |
'console_scripts': [
|