James Ennis pushed to branch jennis/doc_fixes_in_context at BuildStream / buildstream
Commits:
-
0b83d024
by Tristan Van Berkom at 2019-01-10T20:02:50Z
-
630e26f1
by Tristan Van Berkom at 2019-01-10T20:03:52Z
-
d1d7de57
by Tristan Van Berkom at 2019-01-11T04:13:16Z
-
276b8d48
by Tristan Van Berkom at 2019-01-11T04:42:24Z
-
fc3dcec8
by Angelos Evripiotis at 2019-01-11T16:45:00Z
-
f86b7ff3
by Angelos Evripiotis at 2019-01-11T16:45:00Z
-
d983f231
by Angelos Evripiotis at 2019-01-11T17:19:06Z
-
829a2b93
by Tristan Van Berkom at 2019-01-12T21:32:13Z
-
993e30ae
by Tristan Van Berkom at 2019-01-12T22:02:55Z
-
32732e01
by Javier Jardón at 2019-01-14T09:04:01Z
-
4f5f1184
by Valentin David at 2019-01-14T09:40:56Z
-
bb80a2b8
by Chandan Singh at 2019-01-14T14:30:15Z
-
10b3ee62
by Chandan Singh at 2019-01-14T14:57:08Z
-
542cdaf0
by Jürg Billeter at 2019-01-14T18:46:57Z
-
ff666e76
by James Ennis at 2019-01-14T18:46:57Z
-
273b0f55
by Tristan Van Berkom at 2019-01-14T20:24:57Z
-
2e3c2ea2
by Javier Jardón at 2019-01-14T22:26:04Z
-
d60d2e31
by Javier Jardón at 2019-01-15T00:39:22Z
-
605836c1
by Chandan Singh at 2019-01-15T00:57:05Z
-
006370af
by Chandan Singh at 2019-01-15T01:21:33Z
-
be5af6f5
by James Ennis at 2019-01-15T17:14:40Z
-
0cedfec7
by James Ennis at 2019-01-15T17:14:40Z
10 changed files:
- .gitlab-ci.yml
- CONTRIBUTING.rst
- buildstream/_context.py
- buildstream/_frontend/cli.py
- buildstream/_pipeline.py
- buildstream/_scheduler/queues/queue.py
- requirements/dev-requirements.txt
- tests/completions/completions.py
- + tests/frontend/order.py
- tests/testutils/runcli.py
Changes:
... | ... | @@ -61,7 +61,7 @@ tests-ubuntu-18.04: |
61 | 61 |
<<: *tests
|
62 | 62 |
|
63 | 63 |
overnight-fedora-28-aarch64:
|
64 |
- image: buildstream/testsuite-fedora:aarch64-28-06bab030-32a101f6
|
|
64 |
+ image: buildstream/testsuite-fedora:aarch64-28-5da27168-32c47d1c
|
|
65 | 65 |
tags:
|
66 | 66 |
- aarch64
|
67 | 67 |
<<: *tests
|
... | ... | @@ -70,6 +70,12 @@ overnight-fedora-28-aarch64: |
70 | 70 |
except: []
|
71 | 71 |
only:
|
72 | 72 |
- schedules
|
73 |
+ before_script:
|
|
74 |
+ # grpcio needs to be compiled from source on aarch64 so we additionally
|
|
75 |
+ # need a C++ compiler here.
|
|
76 |
+ # FIXME: Ideally this would be provided by the base image. This will be
|
|
77 |
+ # unblocked by https://gitlab.com/BuildStream/buildstream-docker-images/issues/34
|
|
78 |
+ - dnf install -y gcc-c++
|
|
73 | 79 |
|
74 | 80 |
tests-unix:
|
75 | 81 |
# Use fedora here, to a) run a test on fedora and b) ensure that we
|
... | ... | @@ -90,7 +96,6 @@ tests-unix: |
90 | 96 |
# Since the unix platform is required to run as root, no user change required
|
91 | 97 |
- ${TEST_COMMAND}
|
92 | 98 |
|
93 |
- |
|
94 | 99 |
tests-fedora-missing-deps:
|
95 | 100 |
# Ensure that tests behave nicely while missing bwrap and ostree
|
96 | 101 |
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
|
... | ... | @@ -108,6 +113,22 @@ tests-fedora-missing-deps: |
108 | 113 |
|
109 | 114 |
- ${TEST_COMMAND}
|
110 | 115 |
|
116 |
+tests-fedora-update-deps:
|
|
117 |
+ # Check if the tests pass after updating requirements to their latest
|
|
118 |
+ # allowed version.
|
|
119 |
+ allow_failure: true
|
|
120 |
+ image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
|
|
121 |
+ <<: *tests
|
|
122 |
+ |
|
123 |
+ script:
|
|
124 |
+ - useradd -Um buildstream
|
|
125 |
+ - chown -R buildstream:buildstream .
|
|
126 |
+ |
|
127 |
+ - make --always-make --directory requirements
|
|
128 |
+ - cat requirements/*.txt
|
|
129 |
+ |
|
130 |
+ - su buildstream -c "${TEST_COMMAND}"
|
|
131 |
+ |
|
111 | 132 |
# Lint separately from testing
|
112 | 133 |
lint:
|
113 | 134 |
stage: test
|
... | ... | @@ -140,8 +161,8 @@ docs: |
140 | 161 |
stage: test
|
141 | 162 |
variables:
|
142 | 163 |
BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
|
143 |
- BST_EXT_REF: 573843768f4d297f85dc3067465b3c7519a8dcc3 # 0.7.0
|
|
144 |
- FD_SDK_REF: 612f66e218445eee2b1a9d7dd27c9caba571612e # freedesktop-sdk-18.08.19-54-g612f66e2
|
|
164 |
+ BST_EXT_REF: 0.9.0-0-g63a19e8068bd777bd9cd59b1a9442f9749ea5a85
|
|
165 |
+ FD_SDK_REF: freedesktop-sdk-18.08.25-0-g250939d465d6dd7768a215f1fa59c4a3412fc337
|
|
145 | 166 |
before_script:
|
146 | 167 |
- |
|
147 | 168 |
mkdir -p "${HOME}/.config"
|
... | ... | @@ -1534,6 +1534,10 @@ You can always abort on the first failure by running:: |
1534 | 1534 |
|
1535 | 1535 |
tox -- -x
|
1536 | 1536 |
|
1537 |
+Similarly, you may also be interested in the ``--last-failed`` and
|
|
1538 |
+``--failed-first`` options as per the
|
|
1539 |
+`pytest cache <https://docs.pytest.org/en/latest/cache.html>`_ documentation.
|
|
1540 |
+ |
|
1537 | 1541 |
If you want to run a specific test or a group of tests, you
|
1538 | 1542 |
can specify a prefix to match. E.g. if you want to run all of
|
1539 | 1543 |
the frontend tests you can do::
|
... | ... | @@ -1545,6 +1549,12 @@ If you wanted to run the test_build_track test within frontend/buildtrack.py you |
1545 | 1549 |
|
1546 | 1550 |
tox -- tests/frontend/buildtrack.py::test_build_track
|
1547 | 1551 |
|
1552 |
+When running only a few tests, you may find the coverage and timing output
|
|
1553 |
+excessive, there are options to trim them. Note that coverage step will fail.
|
|
1554 |
+Here is an example::
|
|
1555 |
+ |
|
1556 |
+ tox -- --no-cov --durations=1 tests/frontend/buildtrack.py::test_build_track
|
|
1557 |
+ |
|
1548 | 1558 |
We also have a set of slow integration tests that are disabled by
|
1549 | 1559 |
default - you will notice most of them marked with SKIP in the pytest
|
1550 | 1560 |
output. To run them, you can use::
|
... | ... | @@ -317,11 +317,18 @@ class Context(): |
317 | 317 |
# invoked with as opposed to a junctioned subproject.
|
318 | 318 |
#
|
319 | 319 |
# Returns:
|
320 |
- # (list): The list of projects
|
|
320 |
+ # (Project): The Project object
|
|
321 | 321 |
#
|
322 | 322 |
def get_toplevel_project(self):
|
323 | 323 |
return self._projects[0]
|
324 | 324 |
|
325 |
+ # get_workspaces():
|
|
326 |
+ #
|
|
327 |
+ # Return a Workspaces object containing a list of workspaces.
|
|
328 |
+ #
|
|
329 |
+ # Returns:
|
|
330 |
+ # (Workspaces): The Workspaces object
|
|
331 |
+ #
|
|
325 | 332 |
def get_workspaces(self):
|
326 | 333 |
return self._workspaces
|
327 | 334 |
|
... | ... | @@ -2,6 +2,7 @@ import os |
2 | 2 |
import sys
|
3 | 3 |
from contextlib import ExitStack
|
4 | 4 |
from fnmatch import fnmatch
|
5 |
+from functools import partial
|
|
5 | 6 |
from tempfile import TemporaryDirectory
|
6 | 7 |
|
7 | 8 |
import click
|
... | ... | @@ -111,14 +112,25 @@ def complete_target(args, incomplete): |
111 | 112 |
return complete_list
|
112 | 113 |
|
113 | 114 |
|
114 |
-def complete_artifact(args, incomplete):
|
|
115 |
+def complete_artifact(orig_args, args, incomplete):
|
|
115 | 116 |
from .._context import Context
|
116 | 117 |
ctx = Context()
|
117 | 118 |
|
118 | 119 |
config = None
|
119 |
- for i, arg in enumerate(args):
|
|
120 |
- if arg in ('-c', '--config'):
|
|
121 |
- config = args[i + 1]
|
|
120 |
+ if orig_args:
|
|
121 |
+ for i, arg in enumerate(orig_args):
|
|
122 |
+ if arg in ('-c', '--config'):
|
|
123 |
+ try:
|
|
124 |
+ config = orig_args[i + 1]
|
|
125 |
+ except IndexError:
|
|
126 |
+ pass
|
|
127 |
+ if args:
|
|
128 |
+ for i, arg in enumerate(args):
|
|
129 |
+ if arg in ('-c', '--config'):
|
|
130 |
+ try:
|
|
131 |
+ config = args[i + 1]
|
|
132 |
+ except IndexError:
|
|
133 |
+ pass
|
|
122 | 134 |
ctx.load(config)
|
123 | 135 |
|
124 | 136 |
# element targets are valid artifact names
|
... | ... | @@ -128,8 +140,9 @@ def complete_artifact(args, incomplete): |
128 | 140 |
return complete_list
|
129 | 141 |
|
130 | 142 |
|
131 |
-def override_completions(cmd, cmd_param, args, incomplete):
|
|
143 |
+def override_completions(orig_args, cmd, cmd_param, args, incomplete):
|
|
132 | 144 |
"""
|
145 |
+ :param orig_args: original, non-completion args
|
|
133 | 146 |
:param cmd_param: command definition
|
134 | 147 |
:param args: full list of args typed before the incomplete arg
|
135 | 148 |
:param incomplete: the incomplete text to autocomplete
|
... | ... | @@ -150,7 +163,7 @@ def override_completions(cmd, cmd_param, args, incomplete): |
150 | 163 |
cmd_param.opts == ['--track-except']):
|
151 | 164 |
return complete_target(args, incomplete)
|
152 | 165 |
if cmd_param.name == 'artifacts':
|
153 |
- return complete_artifact(args, incomplete)
|
|
166 |
+ return complete_artifact(orig_args, args, incomplete)
|
|
154 | 167 |
|
155 | 168 |
raise CompleteUnhandled()
|
156 | 169 |
|
... | ... | @@ -161,7 +174,7 @@ def override_main(self, args=None, prog_name=None, complete_var=None, |
161 | 174 |
# Hook for the Bash completion. This only activates if the Bash
|
162 | 175 |
# completion is actually enabled, otherwise this is quite a fast
|
163 | 176 |
# noop.
|
164 |
- if main_bashcomplete(self, prog_name, override_completions):
|
|
177 |
+ if main_bashcomplete(self, prog_name, partial(override_completions, args)):
|
|
165 | 178 |
|
166 | 179 |
# If we're running tests we cant just go calling exit()
|
167 | 180 |
# from the main process.
|
... | ... | @@ -22,6 +22,7 @@ |
22 | 22 |
import os
|
23 | 23 |
import itertools
|
24 | 24 |
from operator import itemgetter
|
25 |
+from collections import OrderedDict
|
|
25 | 26 |
|
26 | 27 |
from ._exceptions import PipelineError
|
27 | 28 |
from ._message import Message, MessageType
|
... | ... | @@ -479,7 +480,7 @@ class Pipeline(): |
479 | 480 |
#
|
480 | 481 |
class _Planner():
|
481 | 482 |
def __init__(self):
|
482 |
- self.depth_map = {}
|
|
483 |
+ self.depth_map = OrderedDict()
|
|
483 | 484 |
self.visiting_elements = set()
|
484 | 485 |
|
485 | 486 |
# Here we want to traverse the same element more than once when
|
... | ... | @@ -170,9 +170,9 @@ class Queue(): |
170 | 170 |
skip = [job for job in jobs if self.status(job.element) == QueueStatus.SKIP]
|
171 | 171 |
wait = [job for job in jobs if job not in skip]
|
172 | 172 |
|
173 |
+ self.skipped_elements.extend([job.element for job in skip])
|
|
173 | 174 |
self._wait_queue.extend(wait)
|
174 | 175 |
self._done_queue.extend(skip)
|
175 |
- self.skipped_elements.extend(skip)
|
|
176 | 176 |
|
177 | 177 |
# dequeue()
|
178 | 178 |
#
|
... | ... | @@ -2,7 +2,7 @@ coverage==4.4 |
2 | 2 |
pylint==2.2.2
|
3 | 3 |
pycodestyle==2.4.0
|
4 | 4 |
pytest==4.0.2
|
5 |
-pytest-cov==2.6.0
|
|
5 |
+pytest-cov==2.6.1
|
|
6 | 6 |
pytest-datafiles==2.0
|
7 | 7 |
pytest-env==0.6.2
|
8 | 8 |
pytest-xdist==1.25.0
|
... | ... | @@ -281,3 +281,44 @@ def test_argument_element_invalid(datafiles, cli, project, cmd, word_idx, expect |
281 | 281 |
])
|
282 | 282 |
def test_help_commands(cli, cmd, word_idx, expected):
|
283 | 283 |
assert_completion(cli, cmd, word_idx, expected)
|
284 |
+ |
|
285 |
+ |
|
286 |
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'project'))
|
|
287 |
+def test_argument_artifact(cli, tmpdir, datafiles):
|
|
288 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
289 |
+ |
|
290 |
+ # Build an import element with no dependencies (as there will only be ONE cache key)
|
|
291 |
+ result = cli.run(project=project, args=['build', 'import-bin.bst']) # Has no dependencies
|
|
292 |
+ result.assert_success()
|
|
293 |
+ |
|
294 |
+ # Get the key and the artifact ref ($project/$element_name/$key)
|
|
295 |
+ key = cli.get_element_key(project, 'import-bin.bst')
|
|
296 |
+ artifact = os.path.join('test', 'import-bin', key)
|
|
297 |
+ |
|
298 |
+ # Test autocompletion of the artifact
|
|
299 |
+ cmds = [
|
|
300 |
+ 'bst artifact log ',
|
|
301 |
+ 'bst artifact log t',
|
|
302 |
+ 'bst artifact log test/'
|
|
303 |
+ ]
|
|
304 |
+ |
|
305 |
+ for i, cmd in enumerate(cmds):
|
|
306 |
+ word_idx = 3
|
|
307 |
+ result = cli.run(project=project, cwd=project, env={
|
|
308 |
+ '_BST_COMPLETION': 'complete',
|
|
309 |
+ 'COMP_WORDS': cmd,
|
|
310 |
+ 'COMP_CWORD': str(word_idx)
|
|
311 |
+ })
|
|
312 |
+ words = []
|
|
313 |
+ if result.output:
|
|
314 |
+ words = result.output.splitlines() # This leaves an extra space on each e.g. ['foo.bst ']
|
|
315 |
+ words = [word.strip() for word in words]
|
|
316 |
+ |
|
317 |
+ if i == 0:
|
|
318 |
+ expected = PROJECT_ELEMENTS + [artifact] # We should now be able to see the artifact
|
|
319 |
+ elif i == 1:
|
|
320 |
+ expected = ['target.bst', artifact]
|
|
321 |
+ elif i == 2:
|
|
322 |
+ expected = [artifact]
|
|
323 |
+ |
|
324 |
+ assert expected == words
|
1 |
+import os
|
|
2 |
+ |
|
3 |
+import pytest
|
|
4 |
+from tests.testutils import cli, create_repo
|
|
5 |
+ |
|
6 |
+from buildstream import _yaml
|
|
7 |
+ |
|
8 |
+# Project directory
|
|
9 |
+DATA_DIR = os.path.join(
|
|
10 |
+ os.path.dirname(os.path.realpath(__file__)),
|
|
11 |
+ "project",
|
|
12 |
+)
|
|
13 |
+ |
|
14 |
+ |
|
15 |
+def create_element(repo, name, path, dependencies, ref=None):
|
|
16 |
+ element = {
|
|
17 |
+ 'kind': 'import',
|
|
18 |
+ 'sources': [
|
|
19 |
+ repo.source_config(ref=ref)
|
|
20 |
+ ],
|
|
21 |
+ 'depends': dependencies
|
|
22 |
+ }
|
|
23 |
+ _yaml.dump(element, os.path.join(path, name))
|
|
24 |
+ |
|
25 |
+ |
|
26 |
+# This tests a variety of scenarios and checks that the order in
|
|
27 |
+# which things are processed remains stable.
|
|
28 |
+#
|
|
29 |
+# This is especially important in order to ensure that our
|
|
30 |
+# depth sorting and optimization of which elements should be
|
|
31 |
+# processed first is doing it's job right, and that we are
|
|
32 |
+# promoting elements to the build queue as soon as possible
|
|
33 |
+#
|
|
34 |
+# Parameters:
|
|
35 |
+# targets (target elements): The targets to invoke bst with
|
|
36 |
+# template (dict): The project template dictionary, for create_element()
|
|
37 |
+# expected (list): A list of element names in the expected order
|
|
38 |
+#
|
|
39 |
+@pytest.mark.datafiles(os.path.join(DATA_DIR))
|
|
40 |
+@pytest.mark.parametrize("target,template,expected", [
|
|
41 |
+ # First simple test
|
|
42 |
+ ('3.bst', {
|
|
43 |
+ '0.bst': ['1.bst'],
|
|
44 |
+ '1.bst': [],
|
|
45 |
+ '2.bst': ['0.bst'],
|
|
46 |
+ '3.bst': ['0.bst', '1.bst', '2.bst']
|
|
47 |
+ }, ['1.bst', '0.bst', '2.bst', '3.bst']),
|
|
48 |
+ |
|
49 |
+ # A more complicated test with build of build dependencies
|
|
50 |
+ ('target.bst', {
|
|
51 |
+ 'a.bst': [],
|
|
52 |
+ 'base.bst': [],
|
|
53 |
+ 'timezones.bst': [],
|
|
54 |
+ 'middleware.bst': [{'filename': 'base.bst', 'type': 'build'}],
|
|
55 |
+ 'app.bst': [{'filename': 'middleware.bst', 'type': 'build'}],
|
|
56 |
+ 'target.bst': ['a.bst', 'base.bst', 'middleware.bst', 'app.bst', 'timezones.bst']
|
|
57 |
+ }, ['base.bst', 'middleware.bst', 'a.bst', 'app.bst', 'timezones.bst', 'target.bst']),
|
|
58 |
+])
|
|
59 |
+@pytest.mark.parametrize("operation", [('show'), ('fetch'), ('build')])
|
|
60 |
+def test_order(cli, datafiles, tmpdir, operation, target, template, expected):
|
|
61 |
+ project = os.path.join(datafiles.dirname, datafiles.basename)
|
|
62 |
+ dev_files_path = os.path.join(project, 'files', 'dev-files')
|
|
63 |
+ element_path = os.path.join(project, 'elements')
|
|
64 |
+ |
|
65 |
+ # FIXME: Remove this when the test passes reliably.
|
|
66 |
+ #
|
|
67 |
+ # There is no reason why the order should not
|
|
68 |
+ # be preserved when the builders is set to 1,
|
|
69 |
+ # the scheduler queue processing still seems to
|
|
70 |
+ # be losing the order.
|
|
71 |
+ #
|
|
72 |
+ if operation == 'build':
|
|
73 |
+ pytest.skip("FIXME: This still only sometimes passes")
|
|
74 |
+ |
|
75 |
+ # Configure to only allow one fetcher at a time, make it easy to
|
|
76 |
+ # determine what is being planned in what order.
|
|
77 |
+ cli.configure({
|
|
78 |
+ 'scheduler': {
|
|
79 |
+ 'fetchers': 1,
|
|
80 |
+ 'builders': 1
|
|
81 |
+ }
|
|
82 |
+ })
|
|
83 |
+ |
|
84 |
+ # Build the project from the template, make import elements
|
|
85 |
+ # all with the same repo
|
|
86 |
+ #
|
|
87 |
+ repo = create_repo('git', str(tmpdir))
|
|
88 |
+ ref = repo.create(dev_files_path)
|
|
89 |
+ for element, dependencies in template.items():
|
|
90 |
+ create_element(repo, element, element_path, dependencies, ref=ref)
|
|
91 |
+ repo.add_commit()
|
|
92 |
+ |
|
93 |
+ # Run test and collect results
|
|
94 |
+ if operation == 'show':
|
|
95 |
+ result = cli.run(args=['show', '--deps', 'plan', '--format', '%{name}', target], project=project, silent=True)
|
|
96 |
+ result.assert_success()
|
|
97 |
+ results = result.output.splitlines()
|
|
98 |
+ else:
|
|
99 |
+ if operation == 'fetch':
|
|
100 |
+ result = cli.run(args=['source', 'fetch', target], project=project, silent=True)
|
|
101 |
+ else:
|
|
102 |
+ result = cli.run(args=[operation, target], project=project, silent=True)
|
|
103 |
+ result.assert_success()
|
|
104 |
+ results = result.get_start_order(operation)
|
|
105 |
+ |
|
106 |
+ # Assert the order
|
|
107 |
+ print("Expected order: {}".format(expected))
|
|
108 |
+ print("Observed result order: {}".format(results))
|
|
109 |
+ assert results == expected
|
... | ... | @@ -167,6 +167,23 @@ class Result(): |
167 | 167 |
def assert_shell_error(self, fail_message=''):
|
168 | 168 |
assert self.exit_code == 1, fail_message
|
169 | 169 |
|
170 |
+ # get_start_order()
|
|
171 |
+ #
|
|
172 |
+ # Gets the list of elements processed in a given queue, in the
|
|
173 |
+ # order of their first appearances in the session.
|
|
174 |
+ #
|
|
175 |
+ # Args:
|
|
176 |
+ # activity (str): The queue activity name (like 'fetch')
|
|
177 |
+ #
|
|
178 |
+ # Returns:
|
|
179 |
+ # (list): A list of element names in the order which they first appeared in the result
|
|
180 |
+ #
|
|
181 |
+ def get_start_order(self, activity):
|
|
182 |
+ results = re.findall(r'\[\s*{}:(\S+)\s*\]\s*START\s*.*\.log'.format(activity), self.stderr)
|
|
183 |
+ if results is None:
|
|
184 |
+ return []
|
|
185 |
+ return list(results)
|
|
186 |
+ |
|
170 | 187 |
# get_tracked_elements()
|
171 | 188 |
#
|
172 | 189 |
# Produces a list of element names on which tracking occurred
|