Raoul Hidalgo Charman pushed to branch raoul/smarter-bot-calls at BuildGrid / buildgrid
Commits:
-
859c0fa8
by Finn at 2018-11-27T15:25:25Z
-
e1091b04
by Finn at 2018-11-27T15:25:25Z
-
1dc5d2d2
by Finn at 2018-11-27T15:25:25Z
-
35c901bd
by Finn at 2018-11-27T16:23:49Z
-
5cb38e2a
by Finn at 2018-11-27T16:23:52Z
-
32ad653d
by Finn at 2018-11-27T16:23:52Z
-
e15a9c91
by Finn at 2018-11-27T16:23:52Z
-
bd5587ea
by Finn at 2018-11-27T16:23:52Z
-
d94fa258
by Finn at 2018-11-27T16:23:52Z
-
6f90a553
by Finn at 2018-11-27T16:23:52Z
-
db65c5ec
by Raoul Hidalgo Charman at 2018-11-28T12:23:41Z
-
db53ffbc
by Martin Blanchard at 2018-11-29T08:59:48Z
-
5ecfb7f8
by Martin Blanchard at 2018-11-29T08:59:48Z
-
df5b6a80
by Martin Blanchard at 2018-11-29T08:59:48Z
-
5e608d6b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
c167a1d0
by Martin Blanchard at 2018-11-29T08:59:48Z
-
8fc6d17d
by Martin Blanchard at 2018-11-29T08:59:48Z
-
397f385b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
dbbcdb50
by Martin Blanchard at 2018-11-29T08:59:48Z
-
50f3f63b
by Martin Blanchard at 2018-11-29T08:59:48Z
-
b15b1d10
by Raoul Hidalgo Charman at 2018-11-29T16:38:20Z
-
4a6e7ed6
by Raoul Hidalgo Charman at 2018-11-29T16:38:25Z
-
650612b4
by Raoul Hidalgo Charman at 2018-11-29T16:38:25Z
30 changed files:
- .pylintrc
- buildgrid/_app/commands/cmd_bot.py
- + buildgrid/_app/commands/cmd_capabilities.py
- buildgrid/bot/bot.py
- buildgrid/bot/interface.py
- buildgrid/bot/session.py
- buildgrid/bot/tenantmanager.py
- + buildgrid/client/capabilities.py
- buildgrid/server/bots/instance.py
- buildgrid/server/bots/service.py
- + buildgrid/server/capabilities/__init__.py
- + buildgrid/server/capabilities/instance.py
- + buildgrid/server/capabilities/service.py
- buildgrid/server/cas/instance.py
- buildgrid/server/execution/instance.py
- buildgrid/server/execution/service.py
- buildgrid/server/instance.py
- buildgrid/server/job.py
- buildgrid/server/operations/instance.py
- buildgrid/server/operations/service.py
- buildgrid/server/scheduler.py
- buildgrid/settings.py
- buildgrid/utils.py
- setup.py
- tests/cas/test_storage.py
- tests/integration/bot_session.py
- tests/integration/bots_service.py
- + tests/integration/capabilities_service.py
- tests/utils/bots_interface.py
- + tests/utils/capabilities.py
Changes:
... | ... | @@ -185,6 +185,7 @@ ignore-on-opaque-inference=yes |
185 | 185 |
# for classes with dynamically set attributes). This supports the use of
|
186 | 186 |
# qualified names.
|
187 | 187 |
ignored-classes=google.protobuf.any_pb2.Any,
|
188 |
+ google.protobuf.duration_pb2.Duration,
|
|
188 | 189 |
google.protobuf.timestamp_pb2.Timestamp
|
189 | 190 |
|
190 | 191 |
# List of module names for which member attributes should not be checked
|
... | ... | @@ -460,6 +461,7 @@ known-third-party=boto3, |
460 | 461 |
enchant,
|
461 | 462 |
google,
|
462 | 463 |
grpc,
|
464 |
+ janus,
|
|
463 | 465 |
moto,
|
464 | 466 |
yaml
|
465 | 467 |
|
... | ... | @@ -35,6 +35,7 @@ from buildgrid.bot.hardware.worker import Worker |
35 | 35 |
|
36 | 36 |
from ..bots import buildbox, dummy, host
|
37 | 37 |
from ..cli import pass_context
|
38 |
+from ...settings import INTERVAL_BUFFER
|
|
38 | 39 |
|
39 | 40 |
|
40 | 41 |
@click.group(name='bot', short_help="Create and register bot clients.")
|
... | ... | @@ -54,7 +55,7 @@ from ..cli import pass_context |
54 | 55 |
help="Public CAS client certificate for TLS (PEM-encoded)")
|
55 | 56 |
@click.option('--cas-server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
56 | 57 |
help="Public CAS server certificate for TLS (PEM-encoded)")
|
57 |
-@click.option('--update-period', type=click.FLOAT, default=0.5, show_default=True,
|
|
58 |
+@click.option('--update-period', type=click.FLOAT, default=30, show_default=True,
|
|
58 | 59 |
help="Time period for bot updates to the server in seconds.")
|
59 | 60 |
@click.option('--parent', type=click.STRING, default='main', show_default=True,
|
60 | 61 |
help="Targeted farm resource.")
|
... | ... | @@ -66,7 +67,6 @@ def cli(context, parent, update_period, remote, client_key, client_cert, server_ |
66 | 67 |
|
67 | 68 |
context.remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
68 | 69 |
context.remote_url = remote
|
69 |
- context.update_period = update_period
|
|
70 | 70 |
context.parent = parent
|
71 | 71 |
|
72 | 72 |
if url.scheme == 'http':
|
... | ... | @@ -124,7 +124,7 @@ def cli(context, parent, update_period, remote, client_key, client_cert, server_ |
124 | 124 |
|
125 | 125 |
click.echo("Starting for remote=[{}]".format(context.remote))
|
126 | 126 |
|
127 |
- bot_interface = interface.BotInterface(context.channel)
|
|
127 |
+ bot_interface = interface.BotInterface(context.channel, update_period + INTERVAL_BUFFER)
|
|
128 | 128 |
worker = Worker()
|
129 | 129 |
worker.add_device(Device())
|
130 | 130 |
hardware_interface = HardwareInterface(worker)
|
... | ... | @@ -142,7 +142,7 @@ def run_dummy(context): |
142 | 142 |
try:
|
143 | 143 |
bot_session = session.BotSession(context.parent, context.bot_interface, context.hardware_interface,
|
144 | 144 |
dummy.work_dummy, context)
|
145 |
- b = bot.Bot(bot_session, context.update_period)
|
|
145 |
+ b = bot.Bot(bot_session)
|
|
146 | 146 |
b.session()
|
147 | 147 |
except KeyboardInterrupt:
|
148 | 148 |
pass
|
... | ... | @@ -158,7 +158,7 @@ def run_host_tools(context): |
158 | 158 |
try:
|
159 | 159 |
bot_session = session.BotSession(context.parent, context.bot_interface, context.hardware_interface,
|
160 | 160 |
host.work_host_tools, context)
|
161 |
- b = bot.Bot(bot_session, context.update_period)
|
|
161 |
+ b = bot.Bot(bot_session)
|
|
162 | 162 |
b.session()
|
163 | 163 |
except KeyboardInterrupt:
|
164 | 164 |
pass
|
... | ... | @@ -180,7 +180,7 @@ def run_buildbox(context, local_cas, fuse_dir): |
180 | 180 |
try:
|
181 | 181 |
bot_session = session.BotSession(context.parent, context.bot_interface, context.hardware_interface,
|
182 | 182 |
buildbox.work_buildbox, context)
|
183 |
- b = bot.Bot(bot_session, context.update_period)
|
|
183 |
+ b = bot.Bot(bot_session)
|
|
184 | 184 |
b.session()
|
185 | 185 |
except KeyboardInterrupt:
|
186 | 186 |
pass
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+import sys
|
|
17 |
+from urllib.parse import urlparse
|
|
18 |
+ |
|
19 |
+import click
|
|
20 |
+import grpc
|
|
21 |
+ |
|
22 |
+from buildgrid.client.capabilities import CapabilitiesInterface
|
|
23 |
+ |
|
24 |
+from ..cli import pass_context
|
|
25 |
+ |
|
26 |
+ |
|
27 |
+@click.command(name='capabilities', short_help="Capabilities service.")
|
|
28 |
+@click.option('--remote', type=click.STRING, default='http://localhost:50051', show_default=True,
|
|
29 |
+ help="Remote execution server's URL (port defaults to 50051 if no specified).")
|
|
30 |
+@click.option('--client-key', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
31 |
+ help="Private client key for TLS (PEM-encoded)")
|
|
32 |
+@click.option('--client-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
33 |
+ help="Public client certificate for TLS (PEM-encoded)")
|
|
34 |
+@click.option('--server-cert', type=click.Path(exists=True, dir_okay=False), default=None,
|
|
35 |
+ help="Public server certificate for TLS (PEM-encoded)")
|
|
36 |
+@click.option('--instance-name', type=click.STRING, default='main', show_default=True,
|
|
37 |
+ help="Targeted farm instance name.")
|
|
38 |
+@pass_context
|
|
39 |
+def cli(context, remote, instance_name, client_key, client_cert, server_cert):
|
|
40 |
+ click.echo("Getting capabilities...")
|
|
41 |
+ url = urlparse(remote)
|
|
42 |
+ |
|
43 |
+ remote = '{}:{}'.format(url.hostname, url.port or 50051)
|
|
44 |
+ instance_name = instance_name
|
|
45 |
+ |
|
46 |
+ if url.scheme == 'http':
|
|
47 |
+ channel = grpc.insecure_channel(remote)
|
|
48 |
+ else:
|
|
49 |
+ credentials = context.load_client_credentials(client_key, client_cert, server_cert)
|
|
50 |
+ if not credentials:
|
|
51 |
+ click.echo("ERROR: no TLS keys were specified and no defaults could be found.", err=True)
|
|
52 |
+ sys.exit(-1)
|
|
53 |
+ |
|
54 |
+ channel = grpc.secure_channel(remote, credentials)
|
|
55 |
+ |
|
56 |
+ interface = CapabilitiesInterface(channel)
|
|
57 |
+ response = interface.get_capabilities(instance_name)
|
|
58 |
+ click.echo(response)
|
... | ... | @@ -20,14 +20,12 @@ import logging |
20 | 20 |
class Bot:
|
21 | 21 |
"""Creates a local BotSession."""
|
22 | 22 |
|
23 |
- def __init__(self, bot_session, update_period=1):
|
|
23 |
+ def __init__(self, bot_session):
|
|
24 | 24 |
"""
|
25 | 25 |
"""
|
26 | 26 |
self.__logger = logging.getLogger(__name__)
|
27 | 27 |
|
28 | 28 |
self.__bot_session = bot_session
|
29 |
- self.__update_period = update_period
|
|
30 |
- |
|
31 | 29 |
self.__loop = None
|
32 | 30 |
|
33 | 31 |
def session(self):
|
... | ... | @@ -37,7 +35,7 @@ class Bot: |
37 | 35 |
self.__bot_session.create_bot_session()
|
38 | 36 |
|
39 | 37 |
try:
|
40 |
- task = asyncio.ensure_future(self.__update_bot_session())
|
|
38 |
+ task = asyncio.ensure_future(self.__bot_session.run())
|
|
41 | 39 |
self.__loop.run_until_complete(task)
|
42 | 40 |
|
43 | 41 |
except KeyboardInterrupt:
|
... | ... | @@ -46,16 +44,6 @@ class Bot: |
46 | 44 |
self.__kill_everyone()
|
47 | 45 |
self.__logger.info("Bot shutdown.")
|
48 | 46 |
|
49 |
- async def __update_bot_session(self):
|
|
50 |
- """Calls the server periodically to inform the server the client has not died."""
|
|
51 |
- try:
|
|
52 |
- while True:
|
|
53 |
- self.__bot_session.update_bot_session()
|
|
54 |
- await asyncio.sleep(self.__update_period)
|
|
55 |
- |
|
56 |
- except asyncio.CancelledError:
|
|
57 |
- pass
|
|
58 |
- |
|
59 | 47 |
def __kill_everyone(self):
|
60 | 48 |
"""Cancels and waits for them to stop."""
|
61 | 49 |
self.__logger.info("Cancelling remaining tasks...")
|
... | ... | @@ -31,28 +31,27 @@ class BotInterface: |
31 | 31 |
Interface handles calls to the server.
|
32 | 32 |
"""
|
33 | 33 |
|
34 |
- def __init__(self, channel):
|
|
34 |
+ def __init__(self, channel, interval):
|
|
35 | 35 |
self.__logger = logging.getLogger(__name__)
|
36 | 36 |
|
37 |
+ self.__logger.info(channel)
|
|
37 | 38 |
self._stub = bots_pb2_grpc.BotsStub(channel)
|
39 |
+ self.interval = interval
|
|
38 | 40 |
|
39 | 41 |
def create_bot_session(self, parent, bot_session):
|
40 | 42 |
request = bots_pb2.CreateBotSessionRequest(parent=parent,
|
41 | 43 |
bot_session=bot_session)
|
42 |
- try:
|
|
43 |
- return self._stub.CreateBotSession(request)
|
|
44 |
- |
|
45 |
- except grpc.RpcError as e:
|
|
46 |
- self.__logger.error(e)
|
|
47 |
- raise
|
|
44 |
+ return self._bot_call(self._stub.CreateBotSession, request)
|
|
48 | 45 |
|
49 | 46 |
def update_bot_session(self, bot_session, update_mask=None):
|
50 | 47 |
request = bots_pb2.UpdateBotSessionRequest(name=bot_session.name,
|
51 | 48 |
bot_session=bot_session,
|
52 | 49 |
update_mask=update_mask)
|
53 |
- try:
|
|
54 |
- return self._stub.UpdateBotSession(request)
|
|
50 |
+ return self._bot_call(self._stub.UpdateBotSession, request)
|
|
55 | 51 |
|
52 |
+ def _bot_call(self, call, request):
|
|
53 |
+ try:
|
|
54 |
+ return call(request, timeout=self.interval)
|
|
56 | 55 |
except grpc.RpcError as e:
|
57 | 56 |
self.__logger.error(e)
|
58 | 57 |
raise
|
... | ... | @@ -19,6 +19,7 @@ Bot Session |
19 | 19 |
|
20 | 20 |
Allows connections
|
21 | 21 |
"""
|
22 |
+import asyncio
|
|
22 | 23 |
import logging
|
23 | 24 |
import platform
|
24 | 25 |
|
... | ... | @@ -47,6 +48,8 @@ class BotSession: |
47 | 48 |
self._status = BotStatus.OK.value
|
48 | 49 |
self._tenant_manager = TenantManager()
|
49 | 50 |
|
51 |
+ self.connected = False
|
|
52 |
+ |
|
50 | 53 |
self.__parent = parent
|
51 | 54 |
self.__bot_id = '{}.{}'.format(parent, platform.node())
|
52 | 55 |
self.__name = None
|
... | ... | @@ -58,10 +61,33 @@ class BotSession: |
58 | 61 |
def bot_id(self):
|
59 | 62 |
return self.__bot_id
|
60 | 63 |
|
64 |
+ async def run(self):
|
|
65 |
+ """ Run a bot session
|
|
66 |
+ |
|
67 |
+ This connects and reconnects via create bot session and waits on update
|
|
68 |
+ bot session calls.
|
|
69 |
+ """
|
|
70 |
+ self.__logger.debug("Starting bot session")
|
|
71 |
+ interval = self._bots_interface.interval
|
|
72 |
+ while True:
|
|
73 |
+ if self.connected is False:
|
|
74 |
+ self.create_bot_session()
|
|
75 |
+ else:
|
|
76 |
+ self.update_bot_session()
|
|
77 |
+ |
|
78 |
+ if self.connected is False:
|
|
79 |
+ await asyncio.sleep(interval)
|
|
80 |
+ else:
|
|
81 |
+ await self._tenant_manager.wait_on_tenants(interval)
|
|
82 |
+ |
|
61 | 83 |
def create_bot_session(self):
|
62 | 84 |
self.__logger.debug("Creating bot session")
|
63 | 85 |
|
64 | 86 |
session = self._bots_interface.create_bot_session(self.__parent, self.get_pb2())
|
87 |
+ if session is None:
|
|
88 |
+ self.connected = False
|
|
89 |
+ return
|
|
90 |
+ self.connected = True
|
|
65 | 91 |
self.__name = session.name
|
66 | 92 |
|
67 | 93 |
self.__logger.info("Created bot session with name: [%s]", self.__name)
|
... | ... | @@ -73,6 +99,10 @@ class BotSession: |
73 | 99 |
self.__logger.debug("Updating bot session: [%s]", self.__bot_id)
|
74 | 100 |
|
75 | 101 |
session = self._bots_interface.update_bot_session(self.get_pb2())
|
102 |
+ if session is None:
|
|
103 |
+ self.connected = False
|
|
104 |
+ return
|
|
105 |
+ self.connected = True
|
|
76 | 106 |
server_ids = []
|
77 | 107 |
|
78 | 108 |
for lease in session.leases:
|
... | ... | @@ -150,6 +150,14 @@ class TenantManager: |
150 | 150 |
"""
|
151 | 151 |
return self._tenants[lease_id].tenant_completed
|
152 | 152 |
|
153 |
+ async def wait_on_tenants(self, timeout):
|
|
154 |
+ if self._tasks:
|
|
155 |
+ tasks = self._tasks.values()
|
|
156 |
+ print(type(tasks))
|
|
157 |
+ await asyncio.wait(tasks,
|
|
158 |
+ timeout=timeout,
|
|
159 |
+ return_when=asyncio.FIRST_COMPLETED)
|
|
160 |
+ |
|
153 | 161 |
def _update_lease_result(self, lease_id, result):
|
154 | 162 |
"""Updates the lease with the result."""
|
155 | 163 |
self._tenants[lease_id].update_lease_result(result)
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+import logging
|
|
17 |
+import grpc
|
|
18 |
+ |
|
19 |
+from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
|
20 |
+ |
|
21 |
+ |
|
22 |
+class CapabilitiesInterface:
|
|
23 |
+ """Interface for calls the the Capabilities Service."""
|
|
24 |
+ |
|
25 |
+ def __init__(self, channel):
|
|
26 |
+ """Initialises an instance of the capabilities service.
|
|
27 |
+ |
|
28 |
+ Args:
|
|
29 |
+ channel (grpc.Channel): A gRPC channel to the CAS endpoint.
|
|
30 |
+ """
|
|
31 |
+ self.__logger = logging.getLogger(__name__)
|
|
32 |
+ self.__stub = remote_execution_pb2_grpc.CapabilitiesStub(channel)
|
|
33 |
+ |
|
34 |
+ def get_capabilities(self, instance_name):
|
|
35 |
+ """Returns the capabilities or the server to the user.
|
|
36 |
+ |
|
37 |
+ Args:
|
|
38 |
+ instance_name (str): The name of the instance."""
|
|
39 |
+ |
|
40 |
+ request = remote_execution_pb2.GetCapabilitiesRequest(instance_name=instance_name)
|
|
41 |
+ try:
|
|
42 |
+ return self.__stub.GetCapabilities(request)
|
|
43 |
+ |
|
44 |
+ except grpc.RpcError as e:
|
|
45 |
+ self.__logger.error(e)
|
|
46 |
+ raise
|
... | ... | @@ -26,6 +26,7 @@ import uuid |
26 | 26 |
from buildgrid._exceptions import InvalidArgumentError
|
27 | 27 |
|
28 | 28 |
from ..job import LeaseState
|
29 |
+from ...settings import INTERVAL_BUFFER
|
|
29 | 30 |
|
30 | 31 |
|
31 | 32 |
class BotsInterface:
|
... | ... | @@ -37,6 +38,10 @@ class BotsInterface: |
37 | 38 |
self._assigned_leases = {}
|
38 | 39 |
self._scheduler = scheduler
|
39 | 40 |
|
41 |
+ @property
|
|
42 |
+ def scheduler(self):
|
|
43 |
+ return self._scheduler
|
|
44 |
+ |
|
40 | 45 |
def register_instance_with_server(self, instance_name, server):
|
41 | 46 |
server.add_bots_interface(self, instance_name)
|
42 | 47 |
|
... | ... | @@ -71,7 +76,7 @@ class BotsInterface: |
71 | 76 |
self._request_leases(bot_session)
|
72 | 77 |
return bot_session
|
73 | 78 |
|
74 |
- def update_bot_session(self, name, bot_session):
|
|
79 |
+ def update_bot_session(self, name, bot_session, deadline=None):
|
|
75 | 80 |
""" Client updates the server. Any changes in state to the Lease should be
|
76 | 81 |
registered server side. Assigns available leases with work.
|
77 | 82 |
"""
|
... | ... | @@ -89,14 +94,15 @@ class BotsInterface: |
89 | 94 |
pass
|
90 | 95 |
lease.Clear()
|
91 | 96 |
|
92 |
- self._request_leases(bot_session)
|
|
97 |
+ self._request_leases(bot_session, deadline)
|
|
93 | 98 |
return bot_session
|
94 | 99 |
|
95 |
- def _request_leases(self, bot_session):
|
|
100 |
+ def _request_leases(self, bot_session, deadline=None):
|
|
96 | 101 |
# TODO: Send worker capabilities to the scheduler!
|
97 | 102 |
# Only send one lease at a time currently.
|
98 | 103 |
if not bot_session.leases:
|
99 |
- leases = self._scheduler.request_job_leases({})
|
|
104 |
+ leases = self._scheduler.request_job_leases(
|
|
105 |
+ {}, timeout=deadline - INTERVAL_BUFFER if deadline else None)
|
|
100 | 106 |
if leases:
|
101 | 107 |
for lease in leases:
|
102 | 108 |
self._assigned_leases[bot_session.name].add(lease.id)
|
... | ... | @@ -23,8 +23,9 @@ import logging |
23 | 23 |
|
24 | 24 |
import grpc
|
25 | 25 |
|
26 |
-from google.protobuf.empty_pb2 import Empty
|
|
26 |
+from google.protobuf import empty_pb2, timestamp_pb2
|
|
27 | 27 |
|
28 |
+from buildgrid._enums import BotStatus
|
|
28 | 29 |
from buildgrid._exceptions import InvalidArgumentError, OutOfSyncError
|
29 | 30 |
from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2
|
30 | 31 |
from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grpc
|
... | ... | @@ -32,24 +33,86 @@ from buildgrid._protos.google.devtools.remoteworkers.v1test2 import bots_pb2_grp |
32 | 33 |
|
33 | 34 |
class BotsService(bots_pb2_grpc.BotsServicer):
|
34 | 35 |
|
35 |
- def __init__(self, server):
|
|
36 |
+ def __init__(self, server, monitor=False):
|
|
36 | 37 |
self.__logger = logging.getLogger(__name__)
|
37 | 38 |
|
39 |
+ self.__bots_by_status = None
|
|
40 |
+ self.__bots_by_instance = None
|
|
41 |
+ self.__bots = None
|
|
42 |
+ |
|
38 | 43 |
self._instances = {}
|
39 | 44 |
|
40 | 45 |
bots_pb2_grpc.add_BotsServicer_to_server(self, server)
|
41 | 46 |
|
42 |
- def add_instance(self, name, instance):
|
|
43 |
- self._instances[name] = instance
|
|
47 |
+ self._is_instrumented = monitor
|
|
48 |
+ |
|
49 |
+ if self._is_instrumented:
|
|
50 |
+ self.__bots_by_status = {}
|
|
51 |
+ self.__bots_by_instance = {}
|
|
52 |
+ self.__bots = {}
|
|
53 |
+ |
|
54 |
+ self.__bots_by_status[BotStatus.OK] = set()
|
|
55 |
+ self.__bots_by_status[BotStatus.UNHEALTHY] = set()
|
|
56 |
+ |
|
57 |
+ # --- Public API ---
|
|
58 |
+ |
|
59 |
+ def add_instance(self, instance_name, instance):
|
|
60 |
+ """Registers a new servicer instance.
|
|
61 |
+ |
|
62 |
+ Args:
|
|
63 |
+ instance_name (str): The new instance's name.
|
|
64 |
+ instance (BotsInterface): The new instance itself.
|
|
65 |
+ """
|
|
66 |
+ self._instances[instance_name] = instance
|
|
67 |
+ |
|
68 |
+ if self._is_instrumented:
|
|
69 |
+ self.__bots_by_instance[instance_name] = set()
|
|
70 |
+ |
|
71 |
+ def get_scheduler(self, instance_name):
|
|
72 |
+ """Retrieves a reference to the scheduler for an instance.
|
|
73 |
+ |
|
74 |
+ Args:
|
|
75 |
+ instance_name (str): The name of the instance to query.
|
|
76 |
+ |
|
77 |
+ Returns:
|
|
78 |
+ Scheduler: A reference to the scheduler for `instance_name`.
|
|
79 |
+ |
|
80 |
+ Raises:
|
|
81 |
+ InvalidArgumentError: If no instance named `instance_name` exists.
|
|
82 |
+ """
|
|
83 |
+ instance = self._get_instance(instance_name)
|
|
84 |
+ |
|
85 |
+ return instance.scheduler
|
|
86 |
+ |
|
87 |
+ # --- Public API: Servicer ---
|
|
44 | 88 |
|
45 | 89 |
def CreateBotSession(self, request, context):
|
90 |
+ """Handles CreateBotSessionRequest messages.
|
|
91 |
+ |
|
92 |
+ Args:
|
|
93 |
+ request (CreateBotSessionRequest): The incoming RPC request.
|
|
94 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
95 |
+ """
|
|
46 | 96 |
self.__logger.debug("CreateBotSession request from [%s]", context.peer())
|
47 | 97 |
|
98 |
+ instance_name = request.parent
|
|
99 |
+ bot_status = BotStatus(request.bot_session.status)
|
|
100 |
+ bot_id = request.bot_session.bot_id
|
|
101 |
+ |
|
48 | 102 |
try:
|
49 |
- parent = request.parent
|
|
50 |
- instance = self._get_instance(request.parent)
|
|
51 |
- return instance.create_bot_session(parent,
|
|
52 |
- request.bot_session)
|
|
103 |
+ instance = self._get_instance(instance_name)
|
|
104 |
+ bot_session = instance.create_bot_session(instance_name,
|
|
105 |
+ request.bot_session)
|
|
106 |
+ now = timestamp_pb2.Timestamp()
|
|
107 |
+ now.GetCurrentTime()
|
|
108 |
+ |
|
109 |
+ if self._is_instrumented:
|
|
110 |
+ self.__bots[bot_id] = now
|
|
111 |
+ self.__bots_by_instance[instance_name].add(bot_id)
|
|
112 |
+ if bot_status in self.__bots_by_status:
|
|
113 |
+ self.__bots_by_status[bot_status].add(bot_id)
|
|
114 |
+ |
|
115 |
+ return bot_session
|
|
53 | 116 |
|
54 | 117 |
except InvalidArgumentError as e:
|
55 | 118 |
self.__logger.error(e)
|
... | ... | @@ -59,17 +122,43 @@ class BotsService(bots_pb2_grpc.BotsServicer): |
59 | 122 |
return bots_pb2.BotSession()
|
60 | 123 |
|
61 | 124 |
def UpdateBotSession(self, request, context):
|
125 |
+ """Handles UpdateBotSessionRequest messages.
|
|
126 |
+ |
|
127 |
+ Args:
|
|
128 |
+ request (UpdateBotSessionRequest): The incoming RPC request.
|
|
129 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
130 |
+ """
|
|
62 | 131 |
self.__logger.debug("UpdateBotSession request from [%s]", context.peer())
|
63 | 132 |
|
133 |
+ names = request.name.split("/")
|
|
134 |
+ bot_status = BotStatus(request.bot_session.status)
|
|
135 |
+ bot_id = request.bot_session.bot_id
|
|
136 |
+ |
|
64 | 137 |
try:
|
65 |
- names = request.name.split("/")
|
|
66 |
- # Operation name should be in format:
|
|
67 |
- # {instance/name}/{uuid}
|
|
68 |
- instance_name = ''.join(names[0:-1])
|
|
138 |
+ instance_name = '/'.join(names[:-1])
|
|
69 | 139 |
|
70 | 140 |
instance = self._get_instance(instance_name)
|
71 |
- return instance.update_bot_session(request.name,
|
|
72 |
- request.bot_session)
|
|
141 |
+ bot_session = instance.update_bot_session(
|
|
142 |
+ request.name,
|
|
143 |
+ request.bot_session,
|
|
144 |
+ deadline=context.time_remaining())
|
|
145 |
+ |
|
146 |
+ if self._is_instrumented:
|
|
147 |
+ self.__bots[bot_id].GetCurrentTime()
|
|
148 |
+ if bot_id not in self.__bots_by_status[bot_status]:
|
|
149 |
+ if bot_status == BotStatus.OK:
|
|
150 |
+ self.__bots_by_status[BotStatus.OK].add(bot_id)
|
|
151 |
+ self.__bots_by_status[BotStatus.UNHEALTHY].discard(bot_id)
|
|
152 |
+ |
|
153 |
+ elif bot_status == BotStatus.UNHEALTHY:
|
|
154 |
+ self.__bots_by_status[BotStatus.OK].discard(bot_id)
|
|
155 |
+ self.__bots_by_status[BotStatus.UNHEALTHY].add(bot_id)
|
|
156 |
+ |
|
157 |
+ else:
|
|
158 |
+ self.__bots_by_instance[instance_name].remove(bot_id)
|
|
159 |
+ del self.__bots[bot_id]
|
|
160 |
+ |
|
161 |
+ return bot_session
|
|
73 | 162 |
|
74 | 163 |
except InvalidArgumentError as e:
|
75 | 164 |
self.__logger.error(e)
|
... | ... | @@ -89,10 +178,47 @@ class BotsService(bots_pb2_grpc.BotsServicer): |
89 | 178 |
return bots_pb2.BotSession()
|
90 | 179 |
|
91 | 180 |
def PostBotEventTemp(self, request, context):
|
181 |
+ """Handles PostBotEventTempRequest messages.
|
|
182 |
+ |
|
183 |
+ Args:
|
|
184 |
+ request (PostBotEventTempRequest): The incoming RPC request.
|
|
185 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
186 |
+ """
|
|
92 | 187 |
self.__logger.debug("PostBotEventTemp request from [%s]", context.peer())
|
93 | 188 |
|
94 | 189 |
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
95 |
- return Empty()
|
|
190 |
+ |
|
191 |
+ return empty_pb2.Empty()
|
|
192 |
+ |
|
193 |
+ # --- Public API: Monitoring ---
|
|
194 |
+ |
|
195 |
+ @property
|
|
196 |
+ def is_instrumented(self):
|
|
197 |
+ return self._is_instrumented
|
|
198 |
+ |
|
199 |
+ def query_n_bots(self):
|
|
200 |
+ if self.__bots is not None:
|
|
201 |
+ return len(self.__bots)
|
|
202 |
+ |
|
203 |
+ return 0
|
|
204 |
+ |
|
205 |
+ def query_n_bots_for_instance(self, instance_name):
|
|
206 |
+ try:
|
|
207 |
+ if self.__bots_by_instance is not None:
|
|
208 |
+ return len(self.__bots_by_instance[instance_name])
|
|
209 |
+ except KeyError:
|
|
210 |
+ pass
|
|
211 |
+ return 0
|
|
212 |
+ |
|
213 |
+ def query_n_bots_for_status(self, bot_status):
|
|
214 |
+ try:
|
|
215 |
+ if self.__bots_by_status is not None:
|
|
216 |
+ return len(self.__bots_by_status[bot_status])
|
|
217 |
+ except KeyError:
|
|
218 |
+ pass
|
|
219 |
+ return 0
|
|
220 |
+ |
|
221 |
+ # --- Private API ---
|
|
96 | 222 |
|
97 | 223 |
def _get_instance(self, name):
|
98 | 224 |
try:
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+import logging
|
|
17 |
+ |
|
18 |
+from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
|
19 |
+ |
|
20 |
+ |
|
21 |
+class CapabilitiesInstance:
|
|
22 |
+ |
|
23 |
+ def __init__(self, cas_instance=None, action_cache_instance=None, execution_instance=None):
|
|
24 |
+ self.__logger = logging.getLogger(__name__)
|
|
25 |
+ self.__cas_instance = cas_instance
|
|
26 |
+ self.__action_cache_instance = action_cache_instance
|
|
27 |
+ self.__execution_instance = execution_instance
|
|
28 |
+ |
|
29 |
+ def register_instance_with_server(self, instance_name, server):
|
|
30 |
+ server.add_capabilities_instance(self, instance_name)
|
|
31 |
+ |
|
32 |
+ def add_cas_instance(self, cas_instance):
|
|
33 |
+ self.__cas_instance = cas_instance
|
|
34 |
+ |
|
35 |
+ def add_action_cache_instance(self, action_cache_instance):
|
|
36 |
+ self.__action_cache_instance = action_cache_instance
|
|
37 |
+ |
|
38 |
+ def add_execution_instance(self, execution_instance):
|
|
39 |
+ self.__execution_instance = execution_instance
|
|
40 |
+ |
|
41 |
+ def get_capabilities(self):
|
|
42 |
+ server_capabilities = remote_execution_pb2.ServerCapabilities()
|
|
43 |
+ server_capabilities.cache_capabilities.CopyFrom(self._get_cache_capabilities())
|
|
44 |
+ server_capabilities.execution_capabilities.CopyFrom(self._get_capabilities_execution())
|
|
45 |
+ # TODO
|
|
46 |
+ # When API is stable, fill out SemVer values
|
|
47 |
+ # server_capabilities.deprecated_api_version =
|
|
48 |
+ # server_capabilities.low_api_version =
|
|
49 |
+ # server_capabilities.low_api_version =
|
|
50 |
+ # server_capabilities.hig_api_version =
|
|
51 |
+ return server_capabilities
|
|
52 |
+ |
|
53 |
+ def _get_cache_capabilities(self):
|
|
54 |
+ capabilities = remote_execution_pb2.CacheCapabilities()
|
|
55 |
+ action_cache_update_capabilities = remote_execution_pb2.ActionCacheUpdateCapabilities()
|
|
56 |
+ |
|
57 |
+ if self.__cas_instance:
|
|
58 |
+ capabilities.digest_function.extend([self.__cas_instance.hash_type()])
|
|
59 |
+ capabilities.max_batch_total_size_bytes = self.__cas_instance.max_batch_total_size_bytes()
|
|
60 |
+ capabilities.symlink_absolute_path_strategy = self.__cas_instance.symlink_absolute_path_strategy()
|
|
61 |
+ # TODO: execution priority #102
|
|
62 |
+ # capabilities.cache_priority_capabilities =
|
|
63 |
+ |
|
64 |
+ if self.__action_cache_instance:
|
|
65 |
+ action_cache_update_capabilities.update_enabled = self.__action_cache_instance.allow_updates
|
|
66 |
+ |
|
67 |
+ capabilities.action_cache_update_capabilities.CopyFrom(action_cache_update_capabilities)
|
|
68 |
+ return capabilities
|
|
69 |
+ |
|
70 |
+ def _get_capabilities_execution(self):
|
|
71 |
+ capabilities = remote_execution_pb2.ExecutionCapabilities()
|
|
72 |
+ if self.__execution_instance:
|
|
73 |
+ capabilities.exec_enabled = True
|
|
74 |
+ capabilities.digest_function = self.__execution_instance.hash_type()
|
|
75 |
+ # TODO: execution priority #102
|
|
76 |
+ # capabilities.execution_priority =
|
|
77 |
+ |
|
78 |
+ else:
|
|
79 |
+ capabilities.exec_enabled = False
|
|
80 |
+ |
|
81 |
+ return capabilities
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+import logging
|
|
17 |
+ |
|
18 |
+import grpc
|
|
19 |
+ |
|
20 |
+from buildgrid._exceptions import InvalidArgumentError
|
|
21 |
+from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
|
|
22 |
+ |
|
23 |
+ |
|
24 |
+class CapabilitiesService(remote_execution_pb2_grpc.CapabilitiesServicer):
|
|
25 |
+ |
|
26 |
+ def __init__(self, server):
|
|
27 |
+ self.__logger = logging.getLogger(__name__)
|
|
28 |
+ self.__instances = {}
|
|
29 |
+ remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(self, server)
|
|
30 |
+ |
|
31 |
+ def add_instance(self, name, instance):
|
|
32 |
+ self.__instances[name] = instance
|
|
33 |
+ |
|
34 |
+ def add_cas_instance(self, name, instance):
|
|
35 |
+ self.__instances[name].add_cas_instance(instance)
|
|
36 |
+ |
|
37 |
+ def add_action_cache_instance(self, name, instance):
|
|
38 |
+ self.__instances[name].add_action_cache_instance(instance)
|
|
39 |
+ |
|
40 |
+ def add_execution_instance(self, name, instance):
|
|
41 |
+ self.__instances[name].add_execution_instance(instance)
|
|
42 |
+ |
|
43 |
+ def GetCapabilities(self, request, context):
|
|
44 |
+ try:
|
|
45 |
+ instance = self._get_instance(request.instance_name)
|
|
46 |
+ return instance.get_capabilities()
|
|
47 |
+ |
|
48 |
+ except InvalidArgumentError as e:
|
|
49 |
+ self.__logger.error(e)
|
|
50 |
+ context.set_details(str(e))
|
|
51 |
+ context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
|
|
52 |
+ |
|
53 |
+ return remote_execution_pb2.ServerCapabilities()
|
|
54 |
+ |
|
55 |
+ def _get_instance(self, name):
|
|
56 |
+ try:
|
|
57 |
+ return self.__instances[name]
|
|
58 |
+ |
|
59 |
+ except KeyError:
|
|
60 |
+ raise InvalidArgumentError("Instance doesn't exist on server: [{}]".format(name))
|
... | ... | @@ -25,6 +25,7 @@ from buildgrid._exceptions import InvalidArgumentError, NotFoundError, OutOfRang |
25 | 25 |
from buildgrid._protos.google.bytestream import bytestream_pb2
|
26 | 26 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as re_pb2
|
27 | 27 |
from buildgrid.settings import HASH, HASH_LENGTH
|
28 |
+from buildgrid.utils import get_hash_type
|
|
28 | 29 |
|
29 | 30 |
|
30 | 31 |
class ContentAddressableStorageInstance:
|
... | ... | @@ -37,6 +38,19 @@ class ContentAddressableStorageInstance: |
37 | 38 |
def register_instance_with_server(self, instance_name, server):
|
38 | 39 |
server.add_cas_instance(self, instance_name)
|
39 | 40 |
|
41 |
+ def hash_type(self):
|
|
42 |
+ return get_hash_type()
|
|
43 |
+ |
|
44 |
+ def max_batch_total_size_bytes(self):
|
|
45 |
+ # TODO: link with max size
|
|
46 |
+ # Should be added from settings in MR !119
|
|
47 |
+ return 2000000
|
|
48 |
+ |
|
49 |
+ def symlink_absolute_path_strategy(self):
|
|
50 |
+ # Currently this strategy is hardcoded into BuildGrid
|
|
51 |
+ # With no setting to reference
|
|
52 |
+ return re_pb2.CacheCapabilities().DISALLOWED
|
|
53 |
+ |
|
40 | 54 |
def find_missing_blobs(self, blob_digests):
|
41 | 55 |
storage = self._storage
|
42 | 56 |
return re_pb2.FindMissingBlobsResponse(
|
... | ... | @@ -25,6 +25,7 @@ from buildgrid._exceptions import FailedPreconditionError, InvalidArgumentError |
25 | 25 |
from buildgrid._protos.build.bazel.remote.execution.v2.remote_execution_pb2 import Action
|
26 | 26 |
|
27 | 27 |
from ..job import Job
|
28 |
+from ...utils import get_hash_type
|
|
28 | 29 |
|
29 | 30 |
|
30 | 31 |
class ExecutionInstance:
|
... | ... | @@ -35,9 +36,16 @@ class ExecutionInstance: |
35 | 36 |
self._storage = storage
|
36 | 37 |
self._scheduler = scheduler
|
37 | 38 |
|
39 |
+ @property
|
|
40 |
+ def scheduler(self):
|
|
41 |
+ return self._scheduler
|
|
42 |
+ |
|
38 | 43 |
def register_instance_with_server(self, instance_name, server):
|
39 | 44 |
server.add_execution_instance(self, instance_name)
|
40 | 45 |
|
46 |
+ def hash_type(self):
|
|
47 |
+ return get_hash_type()
|
|
48 |
+ |
|
41 | 49 |
def execute(self, action_digest, skip_cache_lookup, message_queue=None):
|
42 | 50 |
""" Sends a job for execution.
|
43 | 51 |
Queues an action and creates an Operation instance to be associated with
|
... | ... | @@ -33,30 +33,84 @@ from buildgrid._protos.google.longrunning import operations_pb2 |
33 | 33 |
|
34 | 34 |
class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer):
|
35 | 35 |
|
36 |
- def __init__(self, server):
|
|
36 |
+ def __init__(self, server, monitor=False):
|
|
37 | 37 |
self.__logger = logging.getLogger(__name__)
|
38 | 38 |
|
39 |
+ self.__peers_by_instance = None
|
|
40 |
+ self.__peers = None
|
|
41 |
+ |
|
39 | 42 |
self._instances = {}
|
43 |
+ |
|
40 | 44 |
remote_execution_pb2_grpc.add_ExecutionServicer_to_server(self, server)
|
41 | 45 |
|
42 |
- def add_instance(self, name, instance):
|
|
43 |
- self._instances[name] = instance
|
|
46 |
+ self._is_instrumented = monitor
|
|
47 |
+ |
|
48 |
+ if self._is_instrumented:
|
|
49 |
+ self.__peers_by_instance = {}
|
|
50 |
+ self.__peers = {}
|
|
51 |
+ |
|
52 |
+ # --- Public API ---
|
|
53 |
+ |
|
54 |
+ def add_instance(self, instance_name, instance):
|
|
55 |
+ """Registers a new servicer instance.
|
|
56 |
+ |
|
57 |
+ Args:
|
|
58 |
+ instance_name (str): The new instance's name.
|
|
59 |
+ instance (ExecutionInstance): The new instance itself.
|
|
60 |
+ """
|
|
61 |
+ self._instances[instance_name] = instance
|
|
62 |
+ |
|
63 |
+ if self._is_instrumented:
|
|
64 |
+ self.__peers_by_instance[instance_name] = set()
|
|
65 |
+ |
|
66 |
+ def get_scheduler(self, instance_name):
|
|
67 |
+ """Retrieves a reference to the scheduler for an instance.
|
|
68 |
+ |
|
69 |
+ Args:
|
|
70 |
+ instance_name (str): The name of the instance to query.
|
|
71 |
+ |
|
72 |
+ Returns:
|
|
73 |
+ Scheduler: A reference to the scheduler for `instance_name`.
|
|
74 |
+ |
|
75 |
+ Raises:
|
|
76 |
+ InvalidArgumentError: If no instance named `instance_name` exists.
|
|
77 |
+ """
|
|
78 |
+ instance = self._get_instance(instance_name)
|
|
79 |
+ |
|
80 |
+ return instance.scheduler
|
|
81 |
+ |
|
82 |
+ # --- Public API: Servicer ---
|
|
44 | 83 |
|
45 | 84 |
def Execute(self, request, context):
|
85 |
+ """Handles ExecuteRequest messages.
|
|
86 |
+ |
|
87 |
+ Args:
|
|
88 |
+ request (ExecuteRequest): The incoming RPC request.
|
|
89 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
90 |
+ """
|
|
46 | 91 |
self.__logger.debug("Execute request from [%s]", context.peer())
|
47 | 92 |
|
93 |
+ instance_name = request.instance_name
|
|
94 |
+ message_queue = queue.Queue()
|
|
95 |
+ peer = context.peer()
|
|
96 |
+ |
|
48 | 97 |
try:
|
49 |
- message_queue = queue.Queue()
|
|
50 |
- instance = self._get_instance(request.instance_name)
|
|
98 |
+ instance = self._get_instance(instance_name)
|
|
51 | 99 |
operation = instance.execute(request.action_digest,
|
52 | 100 |
request.skip_cache_lookup,
|
53 | 101 |
message_queue)
|
54 | 102 |
|
55 |
- context.add_callback(partial(instance.unregister_message_client,
|
|
56 |
- operation.name, message_queue))
|
|
103 |
+ context.add_callback(partial(self._rpc_termination_callback,
|
|
104 |
+ peer, instance_name, operation.name, message_queue))
|
|
57 | 105 |
|
58 |
- instanced_op_name = "{}/{}".format(request.instance_name,
|
|
59 |
- operation.name)
|
|
106 |
+ if self._is_instrumented:
|
|
107 |
+ if peer not in self.__peers:
|
|
108 |
+ self.__peers_by_instance[instance_name].add(peer)
|
|
109 |
+ self.__peers[peer] = 1
|
|
110 |
+ else:
|
|
111 |
+ self.__peers[peer] += 1
|
|
112 |
+ |
|
113 |
+ instanced_op_name = "{}/{}".format(instance_name, operation.name)
|
|
60 | 114 |
|
61 | 115 |
self.__logger.info("Operation name: [%s]", instanced_op_name)
|
62 | 116 |
|
... | ... | @@ -86,23 +140,33 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer): |
86 | 140 |
yield operations_pb2.Operation()
|
87 | 141 |
|
88 | 142 |
def WaitExecution(self, request, context):
|
89 |
- self.__logger.debug("WaitExecution request from [%s]", context.peer())
|
|
143 |
+ """Handles WaitExecutionRequest messages.
|
|
90 | 144 |
|
91 |
- try:
|
|
92 |
- names = request.name.split("/")
|
|
145 |
+ Args:
|
|
146 |
+ request (WaitExecutionRequest): The incoming RPC request.
|
|
147 |
+ context (grpc.ServicerContext): Context for the RPC call.
|
|
148 |
+ """
|
|
149 |
+ self.__logger.debug("WaitExecution request from [%s]", context.peer())
|
|
93 | 150 |
|
94 |
- # Operation name should be in format:
|
|
95 |
- # {instance/name}/{operation_id}
|
|
96 |
- instance_name = ''.join(names[0:-1])
|
|
151 |
+ names = request.name.split('/')
|
|
152 |
+ instance_name = '/'.join(names[:-1])
|
|
153 |
+ operation_name = names[-1]
|
|
154 |
+ message_queue = queue.Queue()
|
|
155 |
+ peer = context.peer()
|
|
97 | 156 |
|
98 |
- message_queue = queue.Queue()
|
|
99 |
- operation_name = names[-1]
|
|
157 |
+ try:
|
|
100 | 158 |
instance = self._get_instance(instance_name)
|
101 | 159 |
|
102 | 160 |
instance.register_message_client(operation_name, message_queue)
|
161 |
+ context.add_callback(partial(self._rpc_termination_callback,
|
|
162 |
+ peer, instance_name, operation_name, message_queue))
|
|
103 | 163 |
|
104 |
- context.add_callback(partial(instance.unregister_message_client,
|
|
105 |
- operation_name, message_queue))
|
|
164 |
+ if self._is_instrumented:
|
|
165 |
+ if peer not in self.__peers:
|
|
166 |
+ self.__peers_by_instance[instance_name].add(peer)
|
|
167 |
+ self.__peers[peer] = 1
|
|
168 |
+ else:
|
|
169 |
+ self.__peers[peer] += 1
|
|
106 | 170 |
|
107 | 171 |
for operation in instance.stream_operation_updates(message_queue,
|
108 | 172 |
operation_name):
|
... | ... | @@ -123,6 +187,39 @@ class ExecutionService(remote_execution_pb2_grpc.ExecutionServicer): |
123 | 187 |
context.set_code(grpc.StatusCode.CANCELLED)
|
124 | 188 |
yield operations_pb2.Operation()
|
125 | 189 |
|
190 |
+ # --- Public API: Monitoring ---
|
|
191 |
+ |
|
192 |
+ @property
|
|
193 |
+ def is_instrumented(self):
|
|
194 |
+ return self._is_instrumented
|
|
195 |
+ |
|
196 |
+ def query_n_clients(self):
|
|
197 |
+ if self.__peers is not None:
|
|
198 |
+ return len(self.__peers)
|
|
199 |
+ return 0
|
|
200 |
+ |
|
201 |
+ def query_n_clients_for_instance(self, instance_name):
|
|
202 |
+ try:
|
|
203 |
+ if self.__peers_by_instance is not None:
|
|
204 |
+ return len(self.__peers_by_instance[instance_name])
|
|
205 |
+ except KeyError:
|
|
206 |
+ pass
|
|
207 |
+ return 0
|
|
208 |
+ |
|
209 |
+ # --- Private API ---
|
|
210 |
+ |
|
211 |
+ def _rpc_termination_callback(self, peer, instance_name, job_name, message_queue):
|
|
212 |
+ instance = self._get_instance(instance_name)
|
|
213 |
+ |
|
214 |
+ instance.unregister_message_client(job_name, message_queue)
|
|
215 |
+ |
|
216 |
+ if self._is_instrumented:
|
|
217 |
+ if self.__peers[peer] > 1:
|
|
218 |
+ self.__peers[peer] -= 1
|
|
219 |
+ else:
|
|
220 |
+ self.__peers_by_instance[instance_name].remove(peer)
|
|
221 |
+ del self.__peers[peer]
|
|
222 |
+ |
|
126 | 223 |
def _get_instance(self, name):
|
127 | 224 |
try:
|
128 | 225 |
return self._instances[name]
|
... | ... | @@ -15,12 +15,16 @@ |
15 | 15 |
|
16 | 16 |
import asyncio
|
17 | 17 |
from concurrent import futures
|
18 |
+from datetime import timedelta
|
|
18 | 19 |
import logging
|
19 | 20 |
import os
|
20 | 21 |
import signal
|
22 |
+import time
|
|
21 | 23 |
|
22 | 24 |
import grpc
|
23 | 25 |
|
26 |
+from buildgrid._enums import BotStatus, MetricRecordDomain, MetricRecordType
|
|
27 |
+from buildgrid._protos.buildgrid.v2 import monitoring_pb2
|
|
24 | 28 |
from buildgrid.server.actioncache.service import ActionCacheService
|
25 | 29 |
from buildgrid.server.bots.service import BotsService
|
26 | 30 |
from buildgrid.server.cas.service import ByteStreamService, ContentAddressableStorageService
|
... | ... | @@ -28,6 +32,9 @@ from buildgrid.server.execution.service import ExecutionService |
28 | 32 |
from buildgrid.server._monitoring import MonitoringBus, MonitoringOutputType, MonitoringOutputFormat
|
29 | 33 |
from buildgrid.server.operations.service import OperationsService
|
30 | 34 |
from buildgrid.server.referencestorage.service import ReferenceStorageService
|
35 |
+from buildgrid.server.capabilities.instance import CapabilitiesInstance
|
|
36 |
+from buildgrid.server.capabilities.service import CapabilitiesService
|
|
37 |
+from buildgrid.settings import MONITORING_PERIOD
|
|
31 | 38 |
|
32 | 39 |
|
33 | 40 |
class BuildGridServer:
|
... | ... | @@ -55,6 +62,11 @@ class BuildGridServer: |
55 | 62 |
self.__main_loop = asyncio.get_event_loop()
|
56 | 63 |
self.__monitoring_bus = None
|
57 | 64 |
|
65 |
+ self.__state_monitoring_task = None
|
|
66 |
+ |
|
67 |
+ # We always want a capabilities service
|
|
68 |
+ self._capabilities_service = CapabilitiesService(self.__grpc_server)
|
|
69 |
+ |
|
58 | 70 |
self._execution_service = None
|
59 | 71 |
self._bots_service = None
|
60 | 72 |
self._operations_service = None
|
... | ... | @@ -63,6 +75,9 @@ class BuildGridServer: |
63 | 75 |
self._cas_service = None
|
64 | 76 |
self._bytestream_service = None
|
65 | 77 |
|
78 |
+ self._schedulers = {}
|
|
79 |
+ self._instances = set()
|
|
80 |
+ |
|
66 | 81 |
self._is_instrumented = monitor
|
67 | 82 |
|
68 | 83 |
if self._is_instrumented:
|
... | ... | @@ -79,6 +94,10 @@ class BuildGridServer: |
79 | 94 |
if self._is_instrumented:
|
80 | 95 |
self.__monitoring_bus.start()
|
81 | 96 |
|
97 |
+ self.__state_monitoring_task = asyncio.ensure_future(
|
|
98 |
+ self._state_monitoring_worker(period=MONITORING_PERIOD),
|
|
99 |
+ loop=self.__main_loop)
|
|
100 |
+ |
|
82 | 101 |
self.__main_loop.add_signal_handler(signal.SIGTERM, self.stop)
|
83 | 102 |
|
84 | 103 |
self.__main_loop.run_forever()
|
... | ... | @@ -86,6 +105,9 @@ class BuildGridServer: |
86 | 105 |
def stop(self):
|
87 | 106 |
"""Stops the BuildGrid server."""
|
88 | 107 |
if self._is_instrumented:
|
108 |
+ if self.__state_monitoring_task is not None:
|
|
109 |
+ self.__state_monitoring_task.cancel()
|
|
110 |
+ |
|
89 | 111 |
self.__monitoring_bus.stop()
|
90 | 112 |
|
91 | 113 |
self.__main_loop.stop()
|
... | ... | @@ -125,9 +147,14 @@ class BuildGridServer: |
125 | 147 |
instance_name (str): Instance name.
|
126 | 148 |
"""
|
127 | 149 |
if self._execution_service is None:
|
128 |
- self._execution_service = ExecutionService(self.__grpc_server)
|
|
150 |
+ self._execution_service = ExecutionService(
|
|
151 |
+ self.__grpc_server, monitor=self._is_instrumented)
|
|
129 | 152 |
|
130 | 153 |
self._execution_service.add_instance(instance_name, instance)
|
154 |
+ self._add_capabilities_instance(instance_name, execution_instance=instance)
|
|
155 |
+ |
|
156 |
+ self._schedulers[instance_name] = instance.scheduler
|
|
157 |
+ self._instances.add(instance_name)
|
|
131 | 158 |
|
132 | 159 |
def add_bots_interface(self, instance, instance_name):
|
133 | 160 |
"""Adds a :obj:`BotsInterface` to the service.
|
... | ... | @@ -139,10 +166,13 @@ class BuildGridServer: |
139 | 166 |
instance_name (str): Instance name.
|
140 | 167 |
"""
|
141 | 168 |
if self._bots_service is None:
|
142 |
- self._bots_service = BotsService(self.__grpc_server)
|
|
169 |
+ self._bots_service = BotsService(
|
|
170 |
+ self.__grpc_server, monitor=self._is_instrumented)
|
|
143 | 171 |
|
144 | 172 |
self._bots_service.add_instance(instance_name, instance)
|
145 | 173 |
|
174 |
+ self._instances.add(instance_name)
|
|
175 |
+ |
|
146 | 176 |
def add_operations_instance(self, instance, instance_name):
|
147 | 177 |
"""Adds an :obj:`OperationsInstance` to the service.
|
148 | 178 |
|
... | ... | @@ -184,9 +214,10 @@ class BuildGridServer: |
184 | 214 |
self._action_cache_service = ActionCacheService(self.__grpc_server)
|
185 | 215 |
|
186 | 216 |
self._action_cache_service.add_instance(instance_name, instance)
|
217 |
+ self._add_capabilities_instance(instance_name, action_cache_instance=instance)
|
|
187 | 218 |
|
188 | 219 |
def add_cas_instance(self, instance, instance_name):
|
189 |
- """Stores a :obj:`ContentAddressableStorageInstance` to the service.
|
|
220 |
+ """Adds a :obj:`ContentAddressableStorageInstance` to the service.
|
|
190 | 221 |
|
191 | 222 |
If no service exists, it creates one.
|
192 | 223 |
|
... | ... | @@ -198,9 +229,10 @@ class BuildGridServer: |
198 | 229 |
self._cas_service = ContentAddressableStorageService(self.__grpc_server)
|
199 | 230 |
|
200 | 231 |
self._cas_service.add_instance(instance_name, instance)
|
232 |
+ self._add_capabilities_instance(instance_name, cas_instance=instance)
|
|
201 | 233 |
|
202 | 234 |
def add_bytestream_instance(self, instance, instance_name):
|
203 |
- """Stores a :obj:`ByteStreamInstance` to the service.
|
|
235 |
+ """Adds a :obj:`ByteStreamInstance` to the service.
|
|
204 | 236 |
|
205 | 237 |
If no service exists, it creates one.
|
206 | 238 |
|
... | ... | @@ -218,3 +250,180 @@ class BuildGridServer: |
218 | 250 |
@property
|
219 | 251 |
def is_instrumented(self):
|
220 | 252 |
return self._is_instrumented
|
253 |
+ |
|
254 |
+ # --- Private API ---
|
|
255 |
+ |
|
256 |
+ def _add_capabilities_instance(self, instance_name,
|
|
257 |
+ cas_instance=None,
|
|
258 |
+ action_cache_instance=None,
|
|
259 |
+ execution_instance=None):
|
|
260 |
+ """Adds a :obj:`CapabilitiesInstance` to the service.
|
|
261 |
+ |
|
262 |
+ Args:
|
|
263 |
+ instance (:obj:`CapabilitiesInstance`): Instance to add.
|
|
264 |
+ instance_name (str): Instance name.
|
|
265 |
+ """
|
|
266 |
+ |
|
267 |
+ try:
|
|
268 |
+ if cas_instance:
|
|
269 |
+ self._capabilities_service.add_cas_instance(instance_name, cas_instance)
|
|
270 |
+ if action_cache_instance:
|
|
271 |
+ self._capabilities_service.add_action_cache_instance(instance_name, action_cache_instance)
|
|
272 |
+ if execution_instance:
|
|
273 |
+ self._capabilities_service.add_execution_instance(instance_name, execution_instance)
|
|
274 |
+ |
|
275 |
+ except KeyError:
|
|
276 |
+ capabilities_instance = CapabilitiesInstance(cas_instance,
|
|
277 |
+ action_cache_instance,
|
|
278 |
+ execution_instance)
|
|
279 |
+ self._capabilities_service.add_instance(instance_name, capabilities_instance)
|
|
280 |
+ |
|
281 |
+ async def _state_monitoring_worker(self, period=1.0):
|
|
282 |
+ """Periodically publishes state metrics to the monitoring bus."""
|
|
283 |
+ async def __state_monitoring_worker():
|
|
284 |
+ # Emit total clients count record:
|
|
285 |
+ _, record = self._query_n_clients()
|
|
286 |
+ await self.__monitoring_bus.send_record(record)
|
|
287 |
+ |
|
288 |
+ # Emit total bots count record:
|
|
289 |
+ _, record = self._query_n_bots()
|
|
290 |
+ await self.__monitoring_bus.send_record(record)
|
|
291 |
+ |
|
292 |
+ queue_times = []
|
|
293 |
+ # Emits records by instance:
|
|
294 |
+ for instance_name in self._instances:
|
|
295 |
+ # Emit instance clients count record:
|
|
296 |
+ _, record = self._query_n_clients_for_instance(instance_name)
|
|
297 |
+ await self.__monitoring_bus.send_record(record)
|
|
298 |
+ |
|
299 |
+ # Emit instance bots count record:
|
|
300 |
+ _, record = self._query_n_bots_for_instance(instance_name)
|
|
301 |
+ await self.__monitoring_bus.send_record(record)
|
|
302 |
+ |
|
303 |
+ # Emit instance average queue time record:
|
|
304 |
+ queue_time, record = self._query_am_queue_time_for_instance(instance_name)
|
|
305 |
+ await self.__monitoring_bus.send_record(record)
|
|
306 |
+ if queue_time:
|
|
307 |
+ queue_times.append(queue_time)
|
|
308 |
+ |
|
309 |
+ # Emits records by bot status:
|
|
310 |
+ for bot_status in [BotStatus.OK, BotStatus.UNHEALTHY]:
|
|
311 |
+ # Emit status bots count record:
|
|
312 |
+ _, record = self._query_n_bots_for_status(bot_status)
|
|
313 |
+ await self.__monitoring_bus.send_record(record)
|
|
314 |
+ |
|
315 |
+ # Emit overall average queue time record:
|
|
316 |
+ if queue_times:
|
|
317 |
+ am_queue_time = sum(queue_times, timedelta()) / len(queue_times)
|
|
318 |
+ else:
|
|
319 |
+ am_queue_time = timedelta()
|
|
320 |
+ record = self._forge_timer_metric_record(
|
|
321 |
+ MetricRecordDomain.STATE,
|
|
322 |
+ 'average-queue-time',
|
|
323 |
+ am_queue_time)
|
|
324 |
+ |
|
325 |
+ await self.__monitoring_bus.send_record(record)
|
|
326 |
+ |
|
327 |
+ try:
|
|
328 |
+ while True:
|
|
329 |
+ start = time.time()
|
|
330 |
+ await __state_monitoring_worker()
|
|
331 |
+ |
|
332 |
+ end = time.time()
|
|
333 |
+ await asyncio.sleep(period - (end - start))
|
|
334 |
+ |
|
335 |
+ except asyncio.CancelledError:
|
|
336 |
+ pass
|
|
337 |
+ |
|
338 |
+ def _forge_counter_metric_record(self, domain, name, count, metadata=None):
|
|
339 |
+ counter_record = monitoring_pb2.MetricRecord()
|
|
340 |
+ |
|
341 |
+ counter_record.creation_timestamp.GetCurrentTime()
|
|
342 |
+ counter_record.domain = domain.value
|
|
343 |
+ counter_record.type = MetricRecordType.COUNTER.value
|
|
344 |
+ counter_record.name = name
|
|
345 |
+ counter_record.count = count
|
|
346 |
+ if metadata is not None:
|
|
347 |
+ counter_record.metadata.update(metadata)
|
|
348 |
+ |
|
349 |
+ return counter_record
|
|
350 |
+ |
|
351 |
+ def _forge_timer_metric_record(self, domain, name, duration, metadata=None):
|
|
352 |
+ timer_record = monitoring_pb2.MetricRecord()
|
|
353 |
+ |
|
354 |
+ timer_record.creation_timestamp.GetCurrentTime()
|
|
355 |
+ timer_record.domain = domain.value
|
|
356 |
+ timer_record.type = MetricRecordType.TIMER.value
|
|
357 |
+ timer_record.name = name
|
|
358 |
+ timer_record.duration.FromTimedelta(duration)
|
|
359 |
+ if metadata is not None:
|
|
360 |
+ timer_record.metadata.update(metadata)
|
|
361 |
+ |
|
362 |
+ return timer_record
|
|
363 |
+ |
|
364 |
+ def _forge_gauge_metric_record(self, domain, name, value, metadata=None):
|
|
365 |
+ gauge_record = monitoring_pb2.MetricRecord()
|
|
366 |
+ |
|
367 |
+ gauge_record.creation_timestamp.GetCurrentTime()
|
|
368 |
+ gauge_record.domain = domain.value
|
|
369 |
+ gauge_record.type = MetricRecordType.GAUGE.value
|
|
370 |
+ gauge_record.name = name
|
|
371 |
+ gauge_record.value = value
|
|
372 |
+ if metadata is not None:
|
|
373 |
+ gauge_record.metadata.update(metadata)
|
|
374 |
+ |
|
375 |
+ return gauge_record
|
|
376 |
+ |
|
377 |
+ # --- Private API: Monitoring ---
|
|
378 |
+ |
|
379 |
+ def _query_n_clients(self):
|
|
380 |
+ """Queries the number of clients connected."""
|
|
381 |
+ n_clients = self._execution_service.query_n_clients()
|
|
382 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
383 |
+ MetricRecordDomain.STATE, 'clients-count', n_clients)
|
|
384 |
+ |
|
385 |
+ return n_clients, gauge_record
|
|
386 |
+ |
|
387 |
+ def _query_n_clients_for_instance(self, instance_name):
|
|
388 |
+ """Queries the number of clients connected for a given instance"""
|
|
389 |
+ n_clients = self._execution_service.query_n_clients_for_instance(instance_name)
|
|
390 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
391 |
+ MetricRecordDomain.STATE, 'clients-count', n_clients,
|
|
392 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
393 |
+ |
|
394 |
+ return n_clients, gauge_record
|
|
395 |
+ |
|
396 |
+ def _query_n_bots(self):
|
|
397 |
+ """Queries the number of bots connected."""
|
|
398 |
+ n_bots = self._bots_service.query_n_bots()
|
|
399 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
400 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots)
|
|
401 |
+ |
|
402 |
+ return n_bots, gauge_record
|
|
403 |
+ |
|
404 |
+ def _query_n_bots_for_instance(self, instance_name):
|
|
405 |
+ """Queries the number of bots connected for a given instance."""
|
|
406 |
+ n_bots = self._bots_service.query_n_bots_for_instance(instance_name)
|
|
407 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
408 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots,
|
|
409 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
410 |
+ |
|
411 |
+ return n_bots, gauge_record
|
|
412 |
+ |
|
413 |
+ def _query_n_bots_for_status(self, bot_status):
|
|
414 |
+ """Queries the number of bots connected for a given health status."""
|
|
415 |
+ n_bots = self._bots_service.query_n_bots_for_status(bot_status)
|
|
416 |
+ gauge_record = self._forge_gauge_metric_record(
|
|
417 |
+ MetricRecordDomain.STATE, 'bots-count', n_bots,
|
|
418 |
+ metadata={'bot-status': bot_status.name})
|
|
419 |
+ |
|
420 |
+ return n_bots, gauge_record
|
|
421 |
+ |
|
422 |
+ def _query_am_queue_time_for_instance(self, instance_name):
|
|
423 |
+ """Queries the average job's queue time for a given instance."""
|
|
424 |
+ am_queue_time = self._schedulers[instance_name].query_am_queue_time()
|
|
425 |
+ timer_record = self._forge_timer_metric_record(
|
|
426 |
+ MetricRecordDomain.STATE, 'average-queue-time', am_queue_time,
|
|
427 |
+ metadata={'instance-name': instance_name or 'void'})
|
|
428 |
+ |
|
429 |
+ return am_queue_time, timer_record
|
... | ... | @@ -13,10 +13,11 @@ |
13 | 13 |
# limitations under the License.
|
14 | 14 |
|
15 | 15 |
|
16 |
+from datetime import datetime
|
|
16 | 17 |
import logging
|
17 | 18 |
import uuid
|
18 | 19 |
|
19 |
-from google.protobuf import timestamp_pb2
|
|
20 |
+from google.protobuf import duration_pb2, timestamp_pb2
|
|
20 | 21 |
|
21 | 22 |
from buildgrid._enums import LeaseState, OperationStage
|
22 | 23 |
from buildgrid._exceptions import CancelledError
|
... | ... | @@ -40,6 +41,7 @@ class Job: |
40 | 41 |
self.__operation_metadata = remote_execution_pb2.ExecuteOperationMetadata()
|
41 | 42 |
|
42 | 43 |
self.__queued_timestamp = timestamp_pb2.Timestamp()
|
44 |
+ self.__queued_time_duration = duration_pb2.Duration()
|
|
43 | 45 |
self.__worker_start_timestamp = timestamp_pb2.Timestamp()
|
44 | 46 |
self.__worker_completed_timestamp = timestamp_pb2.Timestamp()
|
45 | 47 |
|
... | ... | @@ -56,6 +58,8 @@ class Job: |
56 | 58 |
self._operation.done = False
|
57 | 59 |
self._n_tries = 0
|
58 | 60 |
|
61 |
+ # --- Public API ---
|
|
62 |
+ |
|
59 | 63 |
@property
|
60 | 64 |
def name(self):
|
61 | 65 |
return self._name
|
... | ... | @@ -193,7 +197,7 @@ class Job: |
193 | 197 |
result.Unpack(action_result)
|
194 | 198 |
|
195 | 199 |
action_metadata = action_result.execution_metadata
|
196 |
- action_metadata.queued_timestamp.CopyFrom(self.__worker_start_timestamp)
|
|
200 |
+ action_metadata.queued_timestamp.CopyFrom(self.__queued_timestamp)
|
|
197 | 201 |
action_metadata.worker_start_timestamp.CopyFrom(self.__worker_start_timestamp)
|
198 | 202 |
action_metadata.worker_completed_timestamp.CopyFrom(self.__worker_completed_timestamp)
|
199 | 203 |
|
... | ... | @@ -227,6 +231,10 @@ class Job: |
227 | 231 |
self.__queued_timestamp.GetCurrentTime()
|
228 | 232 |
self._n_tries += 1
|
229 | 233 |
|
234 |
+ elif self.__operation_metadata.stage == OperationStage.EXECUTING.value:
|
|
235 |
+ queue_in, queue_out = self.__queued_timestamp.ToDatetime(), datetime.now()
|
|
236 |
+ self.__queued_time_duration.FromTimedelta(queue_out - queue_in)
|
|
237 |
+ |
|
230 | 238 |
elif self.__operation_metadata.stage == OperationStage.COMPLETED.value:
|
231 | 239 |
if self.__execute_response is not None:
|
232 | 240 |
self._operation.response.Pack(self.__execute_response)
|
... | ... | @@ -260,3 +268,11 @@ class Job: |
260 | 268 |
self.__execute_response.status.message = "Operation cancelled by client."
|
261 | 269 |
|
262 | 270 |
self.update_operation_stage(OperationStage.COMPLETED)
|
271 |
+ |
|
272 |
+ # --- Public API: Monitoring ---
|
|
273 |
+ |
|
274 |
+ def query_queue_time(self):
|
|
275 |
+ return self.__queued_time_duration.ToTimedelta()
|
|
276 |
+ |
|
277 |
+ def query_n_retries(self):
|
|
278 |
+ return self._n_tries - 1 if self._n_tries > 0 else 0
|
... | ... | @@ -32,6 +32,10 @@ class OperationsInstance: |
32 | 32 |
|
33 | 33 |
self._scheduler = scheduler
|
34 | 34 |
|
35 |
+ @property
|
|
36 |
+ def scheduler(self):
|
|
37 |
+ return self._scheduler
|
|
38 |
+ |
|
35 | 39 |
def register_instance_with_server(self, instance_name, server):
|
36 | 40 |
server.add_operations_instance(self, instance_name)
|
37 | 41 |
|
... | ... | @@ -38,8 +38,18 @@ class OperationsService(operations_pb2_grpc.OperationsServicer): |
38 | 38 |
|
39 | 39 |
operations_pb2_grpc.add_OperationsServicer_to_server(self, server)
|
40 | 40 |
|
41 |
- def add_instance(self, name, instance):
|
|
42 |
- self._instances[name] = instance
|
|
41 |
+ # --- Public API ---
|
|
42 |
+ |
|
43 |
+ def add_instance(self, instance_name, instance):
|
|
44 |
+ """Registers a new servicer instance.
|
|
45 |
+ |
|
46 |
+ Args:
|
|
47 |
+ instance_name (str): The new instance's name.
|
|
48 |
+ instance (OperationsInstance): The new instance itself.
|
|
49 |
+ """
|
|
50 |
+ self._instances[instance_name] = instance
|
|
51 |
+ |
|
52 |
+ # --- Public API: Servicer ---
|
|
43 | 53 |
|
44 | 54 |
def GetOperation(self, request, context):
|
45 | 55 |
self.__logger.debug("GetOperation request from [%s]", context.peer())
|
... | ... | @@ -127,6 +137,8 @@ class OperationsService(operations_pb2_grpc.OperationsServicer): |
127 | 137 |
|
128 | 138 |
return Empty()
|
129 | 139 |
|
140 |
+ # --- Private API ---
|
|
141 |
+ |
|
130 | 142 |
def _parse_instance_name(self, name):
|
131 | 143 |
""" If the instance name is not blank, 'name' will have the form
|
132 | 144 |
{instance_name}/{operation_uuid}. Otherwise, it will just be
|
... | ... | @@ -19,34 +19,71 @@ Scheduler |
19 | 19 |
Schedules jobs.
|
20 | 20 |
"""
|
21 | 21 |
|
22 |
-from collections import deque
|
|
22 |
+from datetime import timedelta
|
|
23 | 23 |
import logging
|
24 |
+from queue import Queue, Empty
|
|
24 | 25 |
|
26 |
+from buildgrid._enums import LeaseState, OperationStage
|
|
25 | 27 |
from buildgrid._exceptions import NotFoundError
|
26 | 28 |
|
27 |
-from .job import OperationStage, LeaseState
|
|
28 |
- |
|
29 | 29 |
|
30 | 30 |
class Scheduler:
|
31 | 31 |
|
32 | 32 |
MAX_N_TRIES = 5
|
33 | 33 |
|
34 |
- def __init__(self, action_cache=None):
|
|
34 |
+ def __init__(self, action_cache=None, monitor=False):
|
|
35 | 35 |
self.__logger = logging.getLogger(__name__)
|
36 | 36 |
|
37 |
+ self.__operations_by_stage = None
|
|
38 |
+ self.__leases_by_state = None
|
|
39 |
+ self.__queue_time_average = None
|
|
40 |
+ self.__retries_count = 0
|
|
41 |
+ |
|
37 | 42 |
self._action_cache = action_cache
|
38 | 43 |
self.jobs = {}
|
39 |
- self.queue = deque()
|
|
44 |
+ self.queue = Queue()
|
|
45 |
+ |
|
46 |
+ self._is_instrumented = monitor
|
|
47 |
+ |
|
48 |
+ if self._is_instrumented:
|
|
49 |
+ self.__operations_by_stage = {}
|
|
50 |
+ self.__leases_by_state = {}
|
|
51 |
+ self.__queue_time_average = 0, timedelta()
|
|
52 |
+ |
|
53 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK] = set()
|
|
54 |
+ self.__operations_by_stage[OperationStage.QUEUED] = set()
|
|
55 |
+ self.__operations_by_stage[OperationStage.EXECUTING] = set()
|
|
56 |
+ self.__operations_by_stage[OperationStage.COMPLETED] = set()
|
|
57 |
+ |
|
58 |
+ self.__leases_by_state[LeaseState.PENDING] = set()
|
|
59 |
+ self.__leases_by_state[LeaseState.ACTIVE] = set()
|
|
60 |
+ self.__leases_by_state[LeaseState.COMPLETED] = set()
|
|
61 |
+ |
|
62 |
+ # --- Public API ---
|
|
40 | 63 |
|
41 | 64 |
def register_client(self, job_name, queue):
|
42 |
- self.jobs[job_name].register_client(queue)
|
|
65 |
+ job = self.jobs[job_name]
|
|
66 |
+ |
|
67 |
+ job.register_client(queue)
|
|
43 | 68 |
|
44 | 69 |
def unregister_client(self, job_name, queue):
|
45 |
- self.jobs[job_name].unregister_client(queue)
|
|
70 |
+ job = self.jobs[job_name]
|
|
46 | 71 |
|
47 |
- if not self.jobs[job_name].n_clients and self.jobs[job_name].operation.done:
|
|
72 |
+ job.unregister_client(queue)
|
|
73 |
+ |
|
74 |
+ if not job.n_clients and job.operation.done:
|
|
48 | 75 |
del self.jobs[job_name]
|
49 | 76 |
|
77 |
+ if self._is_instrumented:
|
|
78 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
79 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
80 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
81 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
82 |
+ |
|
83 |
+ self.__leases_by_state[LeaseState.PENDING].discard(job_name)
|
|
84 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(job_name)
|
|
85 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(job_name)
|
|
86 |
+ |
|
50 | 87 |
def queue_job(self, job, skip_cache_lookup=False):
|
51 | 88 |
self.jobs[job.name] = job
|
52 | 89 |
|
... | ... | @@ -56,34 +93,41 @@ class Scheduler: |
56 | 93 |
action_result = self._action_cache.get_action_result(job.action_digest)
|
57 | 94 |
except NotFoundError:
|
58 | 95 |
operation_stage = OperationStage.QUEUED
|
59 |
- self.queue.append(job)
|
|
96 |
+ self.queue.put(job)
|
|
60 | 97 |
|
61 | 98 |
else:
|
62 | 99 |
job.set_cached_result(action_result)
|
63 | 100 |
operation_stage = OperationStage.COMPLETED
|
64 | 101 |
|
102 |
+ if self._is_instrumented:
|
|
103 |
+ self.__retries_count += 1
|
|
104 |
+ |
|
65 | 105 |
else:
|
66 | 106 |
operation_stage = OperationStage.QUEUED
|
67 |
- self.queue.append(job)
|
|
107 |
+ self.queue.put(job)
|
|
68 | 108 |
|
69 |
- job.update_operation_stage(operation_stage)
|
|
109 |
+ self._update_job_operation_stage(job.name, operation_stage)
|
|
70 | 110 |
|
71 | 111 |
def retry_job(self, job_name):
|
72 |
- if job_name in self.jobs:
|
|
73 |
- job = self.jobs[job_name]
|
|
74 |
- if job.n_tries >= self.MAX_N_TRIES:
|
|
75 |
- # TODO: Decide what to do with these jobs
|
|
76 |
- job.update_operation_stage(OperationStage.COMPLETED)
|
|
77 |
- # TODO: Mark these jobs as done
|
|
78 |
- else:
|
|
79 |
- job.update_operation_stage(OperationStage.QUEUED)
|
|
80 |
- job.update_lease_state(LeaseState.PENDING)
|
|
81 |
- self.queue.append(job)
|
|
112 |
+ job = self.jobs[job_name]
|
|
113 |
+ |
|
114 |
+ operation_stage = None
|
|
115 |
+ if job.n_tries >= self.MAX_N_TRIES:
|
|
116 |
+ # TODO: Decide what to do with these jobs
|
|
117 |
+ operation_stage = OperationStage.COMPLETED
|
|
118 |
+ # TODO: Mark these jobs as done
|
|
119 |
+ |
|
120 |
+ else:
|
|
121 |
+ operation_stage = OperationStage.QUEUED
|
|
122 |
+ job.update_lease_state(LeaseState.PENDING)
|
|
123 |
+ self.queue.put(job)
|
|
124 |
+ |
|
125 |
+ self._update_job_operation_stage(job_name, operation_stage)
|
|
82 | 126 |
|
83 | 127 |
def list_jobs(self):
|
84 | 128 |
return self.jobs.values()
|
85 | 129 |
|
86 |
- def request_job_leases(self, worker_capabilities):
|
|
130 |
+ def request_job_leases(self, worker_capabilities, timeout=None):
|
|
87 | 131 |
"""Generates a list of the highest priority leases to be run.
|
88 | 132 |
|
89 | 133 |
Args:
|
... | ... | @@ -91,10 +135,13 @@ class Scheduler: |
91 | 135 |
worker properties, configuration and state at the time of the
|
92 | 136 |
request.
|
93 | 137 |
"""
|
94 |
- if not self.queue:
|
|
138 |
+ if not timeout and self.queue.empty():
|
|
95 | 139 |
return []
|
96 | 140 |
|
97 |
- job = self.queue.popleft()
|
|
141 |
+ try:
|
|
142 |
+ job = self.queue.get(True, timeout) if timeout else self.queue.get(False)
|
|
143 |
+ except Empty:
|
|
144 |
+ return []
|
|
98 | 145 |
|
99 | 146 |
lease = job.lease
|
100 | 147 |
|
... | ... | @@ -118,17 +165,27 @@ class Scheduler: |
118 | 165 |
lease_result (google.protobuf.Any): the lease execution result, only
|
119 | 166 |
required if `lease_state` is `COMPLETED`.
|
120 | 167 |
"""
|
121 |
- |
|
122 | 168 |
job = self.jobs[lease.id]
|
123 | 169 |
lease_state = LeaseState(lease.state)
|
124 | 170 |
|
171 |
+ operation_stage = None
|
|
125 | 172 |
if lease_state == LeaseState.PENDING:
|
126 | 173 |
job.update_lease_state(LeaseState.PENDING)
|
127 |
- job.update_operation_stage(OperationStage.QUEUED)
|
|
174 |
+ operation_stage = OperationStage.QUEUED
|
|
175 |
+ |
|
176 |
+ if self._is_instrumented:
|
|
177 |
+ self.__leases_by_state[LeaseState.PENDING].add(lease.id)
|
|
178 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
|
|
179 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
|
|
128 | 180 |
|
129 | 181 |
elif lease_state == LeaseState.ACTIVE:
|
130 | 182 |
job.update_lease_state(LeaseState.ACTIVE)
|
131 |
- job.update_operation_stage(OperationStage.EXECUTING)
|
|
183 |
+ operation_stage = OperationStage.EXECUTING
|
|
184 |
+ |
|
185 |
+ if self._is_instrumented:
|
|
186 |
+ self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
|
|
187 |
+ self.__leases_by_state[LeaseState.ACTIVE].add(lease.id)
|
|
188 |
+ self.__leases_by_state[LeaseState.COMPLETED].discard(lease.id)
|
|
132 | 189 |
|
133 | 190 |
elif lease_state == LeaseState.COMPLETED:
|
134 | 191 |
job.update_lease_state(LeaseState.COMPLETED,
|
... | ... | @@ -137,7 +194,14 @@ class Scheduler: |
137 | 194 |
if self._action_cache is not None and not job.do_not_cache:
|
138 | 195 |
self._action_cache.update_action_result(job.action_digest, job.action_result)
|
139 | 196 |
|
140 |
- job.update_operation_stage(OperationStage.COMPLETED)
|
|
197 |
+ operation_stage = OperationStage.COMPLETED
|
|
198 |
+ |
|
199 |
+ if self._is_instrumented:
|
|
200 |
+ self.__leases_by_state[LeaseState.PENDING].discard(lease.id)
|
|
201 |
+ self.__leases_by_state[LeaseState.ACTIVE].discard(lease.id)
|
|
202 |
+ self.__leases_by_state[LeaseState.COMPLETED].add(lease.id)
|
|
203 |
+ |
|
204 |
+ self._update_job_operation_stage(lease.id, operation_stage)
|
|
141 | 205 |
|
142 | 206 |
def get_job_lease(self, job_name):
|
143 | 207 |
"""Returns the lease associated to job, if any have been emitted yet."""
|
... | ... | @@ -160,3 +224,101 @@ class Scheduler: |
160 | 224 |
job_name (str): name of the job holding the operation to cancel.
|
161 | 225 |
"""
|
162 | 226 |
self.jobs[job_name].cancel_operation()
|
227 |
+ |
|
228 |
+ # --- Public API: Monitoring ---
|
|
229 |
+ |
|
230 |
+ @property
|
|
231 |
+ def is_instrumented(self):
|
|
232 |
+ return self._is_instrumented
|
|
233 |
+ |
|
234 |
+ def query_n_jobs(self):
|
|
235 |
+ return len(self.jobs)
|
|
236 |
+ |
|
237 |
+ def query_n_operations(self):
|
|
238 |
+ # For now n_operations == n_jobs:
|
|
239 |
+ return len(self.jobs)
|
|
240 |
+ |
|
241 |
+ def query_n_operations_by_stage(self, operation_stage):
|
|
242 |
+ try:
|
|
243 |
+ if self.__operations_by_stage is not None:
|
|
244 |
+ return len(self.__operations_by_stage[operation_stage])
|
|
245 |
+ except KeyError:
|
|
246 |
+ pass
|
|
247 |
+ return 0
|
|
248 |
+ |
|
249 |
+ def query_n_leases(self):
|
|
250 |
+ return len(self.jobs)
|
|
251 |
+ |
|
252 |
+ def query_n_leases_by_state(self, lease_state):
|
|
253 |
+ try:
|
|
254 |
+ if self.__leases_by_state is not None:
|
|
255 |
+ return len(self.__leases_by_state[lease_state])
|
|
256 |
+ except KeyError:
|
|
257 |
+ pass
|
|
258 |
+ return 0
|
|
259 |
+ |
|
260 |
+ def query_n_retries(self):
|
|
261 |
+ return self.__retries_count
|
|
262 |
+ |
|
263 |
+ def query_am_queue_time(self):
|
|
264 |
+ if self.__queue_time_average is not None:
|
|
265 |
+ return self.__queue_time_average[1]
|
|
266 |
+ return timedelta()
|
|
267 |
+ |
|
268 |
+ # --- Private API ---
|
|
269 |
+ |
|
270 |
+ def _update_job_operation_stage(self, job_name, operation_stage):
|
|
271 |
+ """Requests a stage transition for the job's :class:Operations.
|
|
272 |
+ |
|
273 |
+ Args:
|
|
274 |
+ job_name (str): name of the job to query.
|
|
275 |
+ operation_stage (OperationStage): the stage to transition to.
|
|
276 |
+ """
|
|
277 |
+ job = self.jobs[job_name]
|
|
278 |
+ |
|
279 |
+ if operation_stage == OperationStage.CACHE_CHECK:
|
|
280 |
+ job.update_operation_stage(OperationStage.CACHE_CHECK)
|
|
281 |
+ |
|
282 |
+ if self._is_instrumented:
|
|
283 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].add(job_name)
|
|
284 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
285 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
286 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
287 |
+ |
|
288 |
+ elif operation_stage == OperationStage.QUEUED:
|
|
289 |
+ job.update_operation_stage(OperationStage.QUEUED)
|
|
290 |
+ |
|
291 |
+ if self._is_instrumented:
|
|
292 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
293 |
+ self.__operations_by_stage[OperationStage.QUEUED].add(job_name)
|
|
294 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
295 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
296 |
+ |
|
297 |
+ elif operation_stage == OperationStage.EXECUTING:
|
|
298 |
+ job.update_operation_stage(OperationStage.EXECUTING)
|
|
299 |
+ |
|
300 |
+ if self._is_instrumented:
|
|
301 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
302 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
303 |
+ self.__operations_by_stage[OperationStage.EXECUTING].add(job_name)
|
|
304 |
+ self.__operations_by_stage[OperationStage.COMPLETED].discard(job_name)
|
|
305 |
+ |
|
306 |
+ elif operation_stage == OperationStage.COMPLETED:
|
|
307 |
+ job.update_operation_stage(OperationStage.COMPLETED)
|
|
308 |
+ |
|
309 |
+ if self._is_instrumented:
|
|
310 |
+ self.__operations_by_stage[OperationStage.CACHE_CHECK].discard(job_name)
|
|
311 |
+ self.__operations_by_stage[OperationStage.QUEUED].discard(job_name)
|
|
312 |
+ self.__operations_by_stage[OperationStage.EXECUTING].discard(job_name)
|
|
313 |
+ self.__operations_by_stage[OperationStage.COMPLETED].add(job_name)
|
|
314 |
+ |
|
315 |
+ average_order, average_time = self.__queue_time_average
|
|
316 |
+ |
|
317 |
+ average_order += 1
|
|
318 |
+ if average_order <= 1:
|
|
319 |
+ average_time = job.query_queue_time()
|
|
320 |
+ else:
|
|
321 |
+ queue_time = job.query_queue_time()
|
|
322 |
+ average_time = average_time + ((queue_time - average_time) / average_order)
|
|
323 |
+ |
|
324 |
+ self.__queue_time_average = average_order, average_time
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
1 | 16 |
import hashlib
|
2 | 17 |
|
3 | 18 |
|
4 |
-# The hash function that CAS uses
|
|
19 |
+# Hash function used for computing digests:
|
|
5 | 20 |
HASH = hashlib.sha256
|
21 |
+ |
|
22 |
+# Lenght in bytes of a hash string returned by HASH:
|
|
6 | 23 |
HASH_LENGTH = HASH().digest_size * 2
|
24 |
+ |
|
25 |
+# Period, in seconds, for the monitoring cycle:
|
|
26 |
+MONITORING_PERIOD = 5.0
|
|
27 |
+ |
|
28 |
+# time in seconds to pad timeouts
|
|
29 |
+INTERVAL_BUFFER = 5
|
... | ... | @@ -30,6 +30,14 @@ def get_hostname(): |
30 | 30 |
return socket.gethostname()
|
31 | 31 |
|
32 | 32 |
|
33 |
+def get_hash_type():
|
|
34 |
+ """Returns the hash type."""
|
|
35 |
+ hash_name = HASH().name
|
|
36 |
+ if hash_name == "sha256":
|
|
37 |
+ return remote_execution_pb2.SHA256
|
|
38 |
+ return remote_execution_pb2.UNKNOWN
|
|
39 |
+ |
|
40 |
+ |
|
33 | 41 |
def create_digest(bytes_to_digest):
|
34 | 42 |
"""Computes the :obj:`Digest` of a piece of data.
|
35 | 43 |
|
... | ... | @@ -112,13 +112,15 @@ setup( |
112 | 112 |
license="Apache License, Version 2.0",
|
113 | 113 |
description="A remote execution service",
|
114 | 114 |
packages=find_packages(),
|
115 |
+ python_requires='>= 3.5.3', # janus requirement
|
|
115 | 116 |
install_requires=[
|
116 |
- 'protobuf',
|
|
117 |
- 'grpcio',
|
|
118 |
- 'Click',
|
|
119 |
- 'PyYAML',
|
|
120 | 117 |
'boto3 < 1.8.0',
|
121 | 118 |
'botocore < 1.11.0',
|
119 |
+ 'click',
|
|
120 |
+ 'grpcio',
|
|
121 |
+ 'janus',
|
|
122 |
+ 'protobuf',
|
|
123 |
+ 'pyyaml',
|
|
122 | 124 |
],
|
123 | 125 |
entry_points={
|
124 | 126 |
'console_scripts': [
|
... | ... | @@ -21,8 +21,8 @@ import tempfile |
21 | 21 |
|
22 | 22 |
import boto3
|
23 | 23 |
import grpc
|
24 |
-import pytest
|
|
25 | 24 |
from moto import mock_s3
|
25 |
+import pytest
|
|
26 | 26 |
|
27 | 27 |
from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
28 | 28 |
from buildgrid.server.cas.storage.remote import RemoteStorage
|
... | ... | @@ -30,6 +30,7 @@ from ..utils.utils import run_in_subprocess |
30 | 30 |
from ..utils.bots_interface import serve_bots_interface
|
31 | 31 |
|
32 | 32 |
|
33 |
+TIMEOUT = 5
|
|
33 | 34 |
INSTANCES = ['', 'instance']
|
34 | 35 |
|
35 | 36 |
|
... | ... | @@ -48,7 +49,7 @@ class ServerInterface: |
48 | 49 |
bot_session = bots_pb2.BotSession()
|
49 | 50 |
bot_session.ParseFromString(string_bot_session)
|
50 | 51 |
|
51 |
- interface = BotInterface(grpc.insecure_channel(remote))
|
|
52 |
+ interface = BotInterface(grpc.insecure_channel(remote), TIMEOUT)
|
|
52 | 53 |
|
53 | 54 |
result = interface.create_bot_session(parent, bot_session)
|
54 | 55 |
queue.put(result.SerializeToString())
|
... | ... | @@ -67,7 +68,7 @@ class ServerInterface: |
67 | 68 |
bot_session = bots_pb2.BotSession()
|
68 | 69 |
bot_session.ParseFromString(string_bot_session)
|
69 | 70 |
|
70 |
- interface = BotInterface(grpc.insecure_channel(remote))
|
|
71 |
+ interface = BotInterface(grpc.insecure_channel(remote), TIMEOUT)
|
|
71 | 72 |
|
72 | 73 |
result = interface.update_bot_session(bot_session, update_mask)
|
73 | 74 |
queue.put(result.SerializeToString())
|
... | ... | @@ -38,7 +38,9 @@ server = mock.create_autospec(grpc.server) |
38 | 38 |
# GRPC context
|
39 | 39 |
@pytest.fixture
|
40 | 40 |
def context():
|
41 |
- yield mock.MagicMock(spec=_Context)
|
|
41 |
+ context_mock = mock.MagicMock(spec=_Context)
|
|
42 |
+ context_mock.time_remaining.return_value = None
|
|
43 |
+ yield context_mock
|
|
42 | 44 |
|
43 | 45 |
|
44 | 46 |
@pytest.fixture
|
... | ... | @@ -90,7 +92,6 @@ def test_update_bot_session(bot_session, context, instance): |
90 | 92 |
|
91 | 93 |
request = bots_pb2.UpdateBotSessionRequest(name=bot.name,
|
92 | 94 |
bot_session=bot)
|
93 |
- |
|
94 | 95 |
response = instance.UpdateBotSession(request, context)
|
95 | 96 |
|
96 | 97 |
assert isinstance(response, bots_pb2.BotSession)
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+# pylint: disable=redefined-outer-name
|
|
16 |
+ |
|
17 |
+ |
|
18 |
+import grpc
|
|
19 |
+import pytest
|
|
20 |
+ |
|
21 |
+from buildgrid._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
|
|
22 |
+from buildgrid.client.capabilities import CapabilitiesInterface
|
|
23 |
+from buildgrid.server.controller import ExecutionController
|
|
24 |
+from buildgrid.server.actioncache.storage import ActionCache
|
|
25 |
+from buildgrid.server.cas.instance import ContentAddressableStorageInstance
|
|
26 |
+from buildgrid.server.cas.storage.lru_memory_cache import LRUMemoryCache
|
|
27 |
+ |
|
28 |
+from ..utils.utils import run_in_subprocess
|
|
29 |
+from ..utils.capabilities import serve_capabilities_service
|
|
30 |
+ |
|
31 |
+ |
|
32 |
+INSTANCES = ['', 'instance']
|
|
33 |
+ |
|
34 |
+ |
|
35 |
+# Use subprocess to avoid creation of gRPC threads in main process
|
|
36 |
+# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md
|
|
37 |
+# Multiprocessing uses pickle which protobufs don't work with
|
|
38 |
+# Workaround wrapper to send messages as strings
|
|
39 |
+class ServerInterface:
|
|
40 |
+ |
|
41 |
+ def __init__(self, remote):
|
|
42 |
+ self.__remote = remote
|
|
43 |
+ |
|
44 |
+ def get_capabilities(self, instance_name):
|
|
45 |
+ |
|
46 |
+ def __get_capabilities(queue, remote, instance_name):
|
|
47 |
+ interface = CapabilitiesInterface(grpc.insecure_channel(remote))
|
|
48 |
+ |
|
49 |
+ result = interface.get_capabilities(instance_name)
|
|
50 |
+ queue.put(result.SerializeToString())
|
|
51 |
+ |
|
52 |
+ result = run_in_subprocess(__get_capabilities,
|
|
53 |
+ self.__remote, instance_name)
|
|
54 |
+ |
|
55 |
+ capabilities = remote_execution_pb2.ServerCapabilities()
|
|
56 |
+ capabilities.ParseFromString(result)
|
|
57 |
+ return capabilities
|
|
58 |
+ |
|
59 |
+ |
|
60 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
61 |
+def test_execution_not_available_capabilities(instance):
|
|
62 |
+ with serve_capabilities_service([instance]) as server:
|
|
63 |
+ server_interface = ServerInterface(server.remote)
|
|
64 |
+ response = server_interface.get_capabilities(instance)
|
|
65 |
+ |
|
66 |
+ assert not response.execution_capabilities.exec_enabled
|
|
67 |
+ |
|
68 |
+ |
|
69 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
70 |
+def test_execution_available_capabilities(instance):
|
|
71 |
+ controller = ExecutionController()
|
|
72 |
+ |
|
73 |
+ with serve_capabilities_service([instance],
|
|
74 |
+ execution_instance=controller.execution_instance) as server:
|
|
75 |
+ server_interface = ServerInterface(server.remote)
|
|
76 |
+ response = server_interface.get_capabilities(instance)
|
|
77 |
+ |
|
78 |
+ assert response.execution_capabilities.exec_enabled
|
|
79 |
+ assert response.execution_capabilities.digest_function
|
|
80 |
+ |
|
81 |
+ |
|
82 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
83 |
+def test_action_cache_allow_updates_capabilities(instance):
|
|
84 |
+ storage = LRUMemoryCache(limit=256)
|
|
85 |
+ action_cache = ActionCache(storage, max_cached_refs=256, allow_updates=True)
|
|
86 |
+ |
|
87 |
+ with serve_capabilities_service([instance],
|
|
88 |
+ action_cache_instance=action_cache) as server:
|
|
89 |
+ server_interface = ServerInterface(server.remote)
|
|
90 |
+ response = server_interface.get_capabilities(instance)
|
|
91 |
+ |
|
92 |
+ assert response.cache_capabilities.action_cache_update_capabilities.update_enabled
|
|
93 |
+ |
|
94 |
+ |
|
95 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
96 |
+def test_action_cache_not_allow_updates_capabilities(instance):
|
|
97 |
+ storage = LRUMemoryCache(limit=256)
|
|
98 |
+ action_cache = ActionCache(storage, max_cached_refs=256, allow_updates=False)
|
|
99 |
+ |
|
100 |
+ with serve_capabilities_service([instance],
|
|
101 |
+ action_cache_instance=action_cache) as server:
|
|
102 |
+ server_interface = ServerInterface(server.remote)
|
|
103 |
+ response = server_interface.get_capabilities(instance)
|
|
104 |
+ |
|
105 |
+ assert not response.cache_capabilities.action_cache_update_capabilities.update_enabled
|
|
106 |
+ |
|
107 |
+ |
|
108 |
+@pytest.mark.parametrize('instance', INSTANCES)
|
|
109 |
+def test_cas_capabilities(instance):
|
|
110 |
+ cas = ContentAddressableStorageInstance(None)
|
|
111 |
+ |
|
112 |
+ with serve_capabilities_service([instance],
|
|
113 |
+ cas_instance=cas) as server:
|
|
114 |
+ server_interface = ServerInterface(server.remote)
|
|
115 |
+ response = server_interface.get_capabilities(instance)
|
|
116 |
+ |
|
117 |
+ assert len(response.cache_capabilities.digest_function) == 1
|
|
118 |
+ assert response.cache_capabilities.digest_function[0]
|
|
119 |
+ assert response.cache_capabilities.symlink_absolute_path_strategy
|
|
120 |
+ assert response.cache_capabilities.max_batch_total_size_bytes
|
... | ... | @@ -123,7 +123,7 @@ class BotsInterface: |
123 | 123 |
self.__bot_session_queue.put(bot_session.SerializeToString())
|
124 | 124 |
return bot_session
|
125 | 125 |
|
126 |
- def update_bot_session(self, name, bot_session):
|
|
126 |
+ def update_bot_session(self, name, bot_session, deadline=None):
|
|
127 | 127 |
for lease in bot_session.leases:
|
128 | 128 |
state = LeaseState(lease.state)
|
129 | 129 |
if state == LeaseState.COMPLETED:
|
1 |
+# Copyright (C) 2018 Bloomberg LP
|
|
2 |
+#
|
|
3 |
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4 |
+# you may not use this file except in compliance with the License.
|
|
5 |
+# You may obtain a copy of the License at
|
|
6 |
+#
|
|
7 |
+# <http://www.apache.org/licenses/LICENSE-2.0>
|
|
8 |
+#
|
|
9 |
+# Unless required by applicable law or agreed to in writing, software
|
|
10 |
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11 |
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12 |
+# See the License for the specific language governing permissions and
|
|
13 |
+# limitations under the License.
|
|
14 |
+ |
|
15 |
+ |
|
16 |
+from concurrent import futures
|
|
17 |
+from contextlib import contextmanager
|
|
18 |
+import multiprocessing
|
|
19 |
+import os
|
|
20 |
+import signal
|
|
21 |
+ |
|
22 |
+import grpc
|
|
23 |
+import pytest_cov
|
|
24 |
+ |
|
25 |
+from buildgrid.server.capabilities.service import CapabilitiesService
|
|
26 |
+from buildgrid.server.capabilities.instance import CapabilitiesInstance
|
|
27 |
+ |
|
28 |
+ |
|
29 |
+@contextmanager
|
|
30 |
+def serve_capabilities_service(instances,
|
|
31 |
+ cas_instance=None,
|
|
32 |
+ action_cache_instance=None,
|
|
33 |
+ execution_instance=None):
|
|
34 |
+ server = Server(instances,
|
|
35 |
+ cas_instance,
|
|
36 |
+ action_cache_instance,
|
|
37 |
+ execution_instance)
|
|
38 |
+ try:
|
|
39 |
+ yield server
|
|
40 |
+ finally:
|
|
41 |
+ server.quit()
|
|
42 |
+ |
|
43 |
+ |
|
44 |
+class Server:
|
|
45 |
+ |
|
46 |
+ def __init__(self, instances,
|
|
47 |
+ cas_instance=None,
|
|
48 |
+ action_cache_instance=None,
|
|
49 |
+ execution_instance=None):
|
|
50 |
+ self.instances = instances
|
|
51 |
+ |
|
52 |
+ self.__queue = multiprocessing.Queue()
|
|
53 |
+ self.__process = multiprocessing.Process(
|
|
54 |
+ target=Server.serve,
|
|
55 |
+ args=(self.__queue, self.instances, cas_instance, action_cache_instance, execution_instance))
|
|
56 |
+ self.__process.start()
|
|
57 |
+ |
|
58 |
+ self.port = self.__queue.get(timeout=1)
|
|
59 |
+ self.remote = 'localhost:{}'.format(self.port)
|
|
60 |
+ |
|
61 |
+ @staticmethod
|
|
62 |
+ def serve(queue, instances, cas_instance, action_cache_instance, execution_instance):
|
|
63 |
+ pytest_cov.embed.cleanup_on_sigterm()
|
|
64 |
+ |
|
65 |
+ # Use max_workers default from Python 3.5+
|
|
66 |
+ max_workers = (os.cpu_count() or 1) * 5
|
|
67 |
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers))
|
|
68 |
+ port = server.add_insecure_port('localhost:0')
|
|
69 |
+ |
|
70 |
+ capabilities_service = CapabilitiesService(server)
|
|
71 |
+ for name in instances:
|
|
72 |
+ capabilities_instance = CapabilitiesInstance(cas_instance, action_cache_instance, execution_instance)
|
|
73 |
+ capabilities_service.add_instance(name, capabilities_instance)
|
|
74 |
+ |
|
75 |
+ server.start()
|
|
76 |
+ queue.put(port)
|
|
77 |
+ signal.pause()
|
|
78 |
+ |
|
79 |
+ def quit(self):
|
|
80 |
+ if self.__process:
|
|
81 |
+ self.__process.terminate()
|
|
82 |
+ self.__process.join()
|