[Notes] [Git][BuildStream/buildstream][jjardon/freedesktop-sdk_latest] 26 commits: buildstream/plugins/elements/script.py: Mark script as BST_VIRTUAL_DIRECTORY



Title: GitLab

Valentin David pushed to branch jjardon/freedesktop-sdk_latest at BuildStream / buildstream

Commits:

20 changed files:

Changes:

  • .coveragerc
    ... ... @@ -4,11 +4,15 @@ include =
    4 4
       */buildstream/*
    
    5 5
     
    
    6 6
     omit =
    
    7
    -  # Omit profiling helper module
    
    7
    +  # Omit some internals
    
    8 8
       */buildstream/_profile.py
    
    9
    +  */buildstream/__main__.py
    
    10
    +  */buildstream/_version.py
    
    9 11
       # Omit generated code
    
    10 12
       */buildstream/_protos/*
    
    11 13
       */.eggs/*
    
    14
    +  # Omit .tox directory
    
    15
    +  */.tox/*
    
    12 16
     
    
    13 17
     [report]
    
    14 18
     show_missing = True
    

  • .gitignore
    ... ... @@ -13,11 +13,12 @@ tests/**/*.pyc
    13 13
     integration-cache/
    
    14 14
     tmp
    
    15 15
     .coverage
    
    16
    +.coverage-reports/
    
    16 17
     .coverage.*
    
    17 18
     .cache
    
    18 19
     .pytest_cache/
    
    19 20
     *.bst/
    
    20
    -.tox
    
    21
    +.tox/
    
    21 22
     
    
    22 23
     # Pycache, in case buildstream is ran directly from within the source
    
    23 24
     # tree
    

  • .gitlab-ci.yml
    ... ... @@ -13,6 +13,7 @@ variables:
    13 13
       PYTEST_ADDOPTS: "--color=yes"
    
    14 14
       INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache"
    
    15 15
       TEST_COMMAND: "tox -- --color=yes --integration"
    
    16
    +  COVERAGE_PREFIX: "${CI_JOB_NAME}."
    
    16 17
     
    
    17 18
     
    
    18 19
     #####################################################
    
    ... ... @@ -24,9 +25,6 @@ variables:
    24 25
     .tests-template: &tests
    
    25 26
       stage: test
    
    26 27
     
    
    27
    -  variables:
    
    28
    -    COVERAGE_DIR: coverage-linux
    
    29
    -
    
    30 28
       before_script:
    
    31 29
       # Diagnostics
    
    32 30
       - mount
    
    ... ... @@ -40,14 +38,11 @@ variables:
    40 38
       - su buildstream -c "${TEST_COMMAND}"
    
    41 39
     
    
    42 40
       after_script:
    
    43
    -  # Collect our reports
    
    44
    -  - mkdir -p ${COVERAGE_DIR}
    
    45
    -  - cp .coverage ${COVERAGE_DIR}/coverage."${CI_JOB_NAME}"
    
    46 41
       except:
    
    47 42
       - schedules
    
    48 43
       artifacts:
    
    49 44
         paths:
    
    50
    -    - ${COVERAGE_DIR}
    
    45
    +    - .coverage-reports
    
    51 46
     
    
    52 47
     tests-debian-9:
    
    53 48
       image: buildstream/testsuite-debian:9-5da27168-32c47d1c
    
    ... ... @@ -83,7 +78,6 @@ tests-unix:
    83 78
       <<: *tests
    
    84 79
       variables:
    
    85 80
         BST_FORCE_BACKEND: "unix"
    
    86
    -    COVERAGE_DIR: coverage-unix
    
    87 81
     
    
    88 82
       script:
    
    89 83
     
    
    ... ... @@ -147,7 +141,7 @@ docs:
    147 141
       variables:
    
    148 142
         BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
    
    149 143
         BST_EXT_REF: 573843768f4d297f85dc3067465b3c7519a8dcc3 # 0.7.0
    
    150
    -    FD_SDK_REF: 612f66e218445eee2b1a9d7dd27c9caba571612e # freedesktop-sdk-18.08.19-54-g612f66e2
    
    144
    +    FD_SDK_REF: freedesktop-sdk-18.08.25-0-g250939d465d6dd7768a215f1fa59c4a3412fc337
    
    151 145
       before_script:
    
    152 146
       - |
    
    153 147
         mkdir -p "${HOME}/.config"
    
    ... ... @@ -239,22 +233,22 @@ coverage:
    239 233
       stage: post
    
    240 234
       coverage: '/TOTAL +\d+ +\d+ +(\d+\.\d+)%/'
    
    241 235
       script:
    
    242
    -    - pip3 install -r requirements/requirements.txt -r requirements/dev-requirements.txt
    
    243
    -    - pip3 install --no-index .
    
    244
    -    - mkdir report
    
    245
    -    - cd report
    
    246
    -    - cp ../coverage-unix/coverage.* .
    
    247
    -    - cp ../coverage-linux/coverage.* .
    
    248
    -    - ls coverage.*
    
    249
    -    - coverage combine --rcfile=../.coveragerc -a coverage.*
    
    250
    -    - coverage report --rcfile=../.coveragerc -m
    
    236
    +    - cp -a .coverage-reports/ ./coverage-sources
    
    237
    +    - tox -e coverage
    
    238
    +    - cp -a .coverage-reports/ ./coverage-report
    
    251 239
       dependencies:
    
    252 240
       - tests-debian-9
    
    253 241
       - tests-fedora-27
    
    254 242
       - tests-fedora-28
    
    243
    +  - tests-fedora-missing-deps
    
    244
    +  - tests-ubuntu-18.04
    
    255 245
       - tests-unix
    
    256 246
       except:
    
    257 247
       - schedules
    
    248
    +  artifacts:
    
    249
    +    paths:
    
    250
    +    - coverage-sources/
    
    251
    +    - coverage-report/
    
    258 252
     
    
    259 253
     # Deploy, only for merges which land on master branch.
    
    260 254
     #
    

  • CONTRIBUTING.rst
    ... ... @@ -553,7 +553,7 @@ One problem which arises from this is that we end up having symbols
    553 553
     which are *public* according to the :ref:`rules discussed in the previous section
    
    554 554
     <contributing_public_and_private>`, but must be hidden away from the
    
    555 555
     *"Public API Surface"*. For example, BuildStream internal classes need
    
    556
    -to invoke methods on the ``Element`` and ``Source`` classes, wheras these
    
    556
    +to invoke methods on the ``Element`` and ``Source`` classes, whereas these
    
    557 557
     methods need to be hidden from the *"Public API Surface"*.
    
    558 558
     
    
    559 559
     This is where BuildStream deviates from the PEP-8 standard for public
    
    ... ... @@ -631,7 +631,7 @@ An element plugin will derive from Element by importing::
    631 631
     
    
    632 632
       from buildstream import Element
    
    633 633
     
    
    634
    -When importing utilities specifically, dont import function names
    
    634
    +When importing utilities specifically, don't import function names
    
    635 635
     from there, instead import the module itself::
    
    636 636
     
    
    637 637
       from . import utils
    
    ... ... @@ -737,7 +737,7 @@ Abstract methods
    737 737
     ~~~~~~~~~~~~~~~~
    
    738 738
     In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does
    
    739 739
     not match up to how Python defines abstract methods, we need to seek out
    
    740
    -a new nomanclature to refer to these methods.
    
    740
    +a new nomenclature to refer to these methods.
    
    741 741
     
    
    742 742
     In Python, an *"Abstract Method"* is a method which **must** be
    
    743 743
     implemented by a subclass, whereas all methods in Python can be
    
    ... ... @@ -960,7 +960,7 @@ possible, and avoid any cyclic relationships in modules.
    960 960
     For instance, the ``Source`` objects are owned by ``Element``
    
    961 961
     objects in the BuildStream data model, and as such the ``Element``
    
    962 962
     will delegate some activities to the ``Source`` objects in its
    
    963
    -possesion. The ``Source`` objects should however never call functions
    
    963
    +possession. The ``Source`` objects should however never call functions
    
    964 964
     on the ``Element`` object, nor should the ``Source`` object itself
    
    965 965
     have any understanding of what an ``Element`` is.
    
    966 966
     
    
    ... ... @@ -1223,7 +1223,7 @@ For further information about using the reStructuredText with sphinx, please see
    1223 1223
     Building Docs
    
    1224 1224
     ~~~~~~~~~~~~~
    
    1225 1225
     Before you can build the docs, you will end to ensure that you have installed
    
    1226
    -the required :ref:`buid dependencies <contributing_build_deps>` as mentioned
    
    1226
    +the required :ref:`build dependencies <contributing_build_deps>` as mentioned
    
    1227 1227
     in the testing section above.
    
    1228 1228
     
    
    1229 1229
     To build the documentation, just run the following::
    
    ... ... @@ -1365,7 +1365,7 @@ Structure of an example
    1365 1365
     '''''''''''''''''''''''
    
    1366 1366
     The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections
    
    1367 1367
     of the documentation contain a series of sample projects, each chapter in
    
    1368
    -the tutoral, or standalone example uses a sample project.
    
    1368
    +the tutorial, or standalone example uses a sample project.
    
    1369 1369
     
    
    1370 1370
     Here is the the structure for adding new examples and tutorial chapters.
    
    1371 1371
     
    
    ... ... @@ -1471,8 +1471,8 @@ Installing build dependencies
    1471 1471
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    
    1472 1472
     Some of BuildStream's dependencies have non-python build dependencies. When
    
    1473 1473
     running tests with ``tox``, you will first need to install these dependencies.
    
    1474
    -Exact steps to install these will depend on your oprtation systemm. Commands
    
    1475
    -for installing them for some common distributions are lised below.
    
    1474
    +Exact steps to install these will depend on your operating system. Commands
    
    1475
    +for installing them for some common distributions are listed below.
    
    1476 1476
     
    
    1477 1477
     For Fedora-based systems::
    
    1478 1478
     
    
    ... ... @@ -1498,6 +1498,13 @@ option when running tox::
    1498 1498
     
    
    1499 1499
       tox -e py37
    
    1500 1500
     
    
    1501
    +If you would like to test and lint at the same time, or if you do have multiple
    
    1502
    +python versions installed and would like to test against multiple versions, then
    
    1503
    +we recommend using `detox <https://github.com/tox-dev/detox>`_, just run it with
    
    1504
    +the same arguments you would give `tox`::
    
    1505
    +
    
    1506
    +  detox -e lint,py36,py37
    
    1507
    +
    
    1501 1508
     Linting is performed separately from testing. In order to run the linting step which
    
    1502 1509
     consists of running the ``pycodestyle`` and ``pylint`` tools, run the following::
    
    1503 1510
     
    
    ... ... @@ -1527,17 +1534,27 @@ You can always abort on the first failure by running::
    1527 1534
     
    
    1528 1535
       tox -- -x
    
    1529 1536
     
    
    1537
    +Similarly, you may also be interested in the ``--last-failed`` and
    
    1538
    +``--failed-first`` options as per the
    
    1539
    +`pytest cache <https://docs.pytest.org/en/latest/cache.html>`_ documentation.
    
    1540
    +
    
    1530 1541
     If you want to run a specific test or a group of tests, you
    
    1531 1542
     can specify a prefix to match. E.g. if you want to run all of
    
    1532 1543
     the frontend tests you can do::
    
    1533 1544
     
    
    1534 1545
       tox -- tests/frontend/
    
    1535 1546
     
    
    1536
    -Specific tests can be chosen by using the :: delimeter after the test module.
    
    1547
    +Specific tests can be chosen by using the :: delimiter after the test module.
    
    1537 1548
     If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
    
    1538 1549
     
    
    1539 1550
       tox -- tests/frontend/buildtrack.py::test_build_track
    
    1540 1551
     
    
    1552
    +When running only a few tests, you may find the coverage and timing output
    
    1553
    +excessive, there are options to trim them. Note that coverage step will fail.
    
    1554
    +Here is an example::
    
    1555
    +
    
    1556
    +  tox -- --no-cov --durations=1 tests/frontend/buildtrack.py::test_build_track
    
    1557
    +
    
    1541 1558
     We also have a set of slow integration tests that are disabled by
    
    1542 1559
     default - you will notice most of them marked with SKIP in the pytest
    
    1543 1560
     output. To run them, you can use::
    
    ... ... @@ -1553,7 +1570,7 @@ can run ``tox`` with ``-r`` or ``--recreate`` option.
    1553 1570
     .. note::
    
    1554 1571
     
    
    1555 1572
        By default, we do not allow use of site packages in our ``tox``
    
    1556
    -   confguration to enable running the tests in an isolated environment.
    
    1573
    +   configuration to enable running the tests in an isolated environment.
    
    1557 1574
        If you need to enable use of site packages for whatever reason, you can
    
    1558 1575
        do so by passing the ``--sitepackages`` option to ``tox``. Also, you will
    
    1559 1576
        not need to install any of the build dependencies mentioned above if you
    
    ... ... @@ -1574,10 +1591,23 @@ can run ``tox`` with ``-r`` or ``--recreate`` option.
    1574 1591
          ./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
    
    1575 1592
     
    
    1576 1593
     
    
    1594
    +Observing coverage
    
    1595
    +~~~~~~~~~~~~~~~~~~
    
    1596
    +Once you have run the tests using `tox` (or `detox`), some coverage reports will
    
    1597
    +have been left behind.
    
    1598
    +
    
    1599
    +To view the coverage report of the last test run, simply run::
    
    1600
    +
    
    1601
    +  tox -e coverage
    
    1602
    +
    
    1603
    +This will collate any reports from separate python environments that may be
    
    1604
    +under test before displaying the combined coverage.
    
    1605
    +
    
    1606
    +
    
    1577 1607
     Adding tests
    
    1578 1608
     ~~~~~~~~~~~~
    
    1579 1609
     Tests are found in the tests subdirectory, inside of which
    
    1580
    -there is a separarate directory for each *domain* of tests.
    
    1610
    +there is a separate directory for each *domain* of tests.
    
    1581 1611
     All tests are collected as::
    
    1582 1612
     
    
    1583 1613
       tests/*/*.py
    

  • buildstream/_artifactcache/cascache.py
    ... ... @@ -53,7 +53,7 @@ class CASRemoteSpec(namedtuple('CASRemoteSpec', 'url push server_cert client_key
    53 53
         #
    
    54 54
         @staticmethod
    
    55 55
         def _new_from_config_node(spec_node, basedir=None):
    
    56
    -        _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert', 'instance_name'])
    
    56
    +        _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert', 'instance-name'])
    
    57 57
             url = _yaml.node_get(spec_node, str, 'url')
    
    58 58
             push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
    
    59 59
             if not url:
    
    ... ... @@ -61,7 +61,7 @@ class CASRemoteSpec(namedtuple('CASRemoteSpec', 'url push server_cert client_key
    61 61
                 raise LoadError(LoadErrorReason.INVALID_DATA,
    
    62 62
                                 "{}: empty artifact cache URL".format(provenance))
    
    63 63
     
    
    64
    -        instance_name = _yaml.node_get(spec_node, str, 'instance_name', default_value=None)
    
    64
    +        instance_name = _yaml.node_get(spec_node, str, 'instance-name', default_value=None)
    
    65 65
     
    
    66 66
             server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
    
    67 67
             if server_cert and basedir:
    

  • buildstream/_options/optionarch.py
    ... ... @@ -17,6 +17,8 @@
    17 17
     #  Authors:
    
    18 18
     #        Tristan Van Berkom <tristan vanberkom codethink co uk>
    
    19 19
     
    
    20
    +from .. import _yaml
    
    21
    +from .._exceptions import LoadError, LoadErrorReason, PlatformError
    
    20 22
     from .._platform import Platform
    
    21 23
     from .optionenum import OptionEnum
    
    22 24
     
    
    ... ... @@ -41,7 +43,34 @@ class OptionArch(OptionEnum):
    41 43
             super(OptionArch, self).load(node, allow_default_definition=False)
    
    42 44
     
    
    43 45
         def load_default_value(self, node):
    
    44
    -        return Platform.get_host_arch()
    
    46
    +        arch = Platform.get_host_arch()
    
    47
    +
    
    48
    +        default_value = None
    
    49
    +
    
    50
    +        for index, value in enumerate(self.values):
    
    51
    +            try:
    
    52
    +                canonical_value = Platform.canonicalize_arch(value)
    
    53
    +                if default_value is None and canonical_value == arch:
    
    54
    +                    default_value = value
    
    55
    +                    # Do not terminate the loop early to ensure we validate
    
    56
    +                    # all values in the list.
    
    57
    +            except PlatformError as e:
    
    58
    +                provenance = _yaml.node_get_provenance(node, key='values', indices=[index])
    
    59
    +                prefix = ""
    
    60
    +                if provenance:
    
    61
    +                    prefix = "{}: ".format(provenance)
    
    62
    +                raise LoadError(LoadErrorReason.INVALID_DATA,
    
    63
    +                                "{}Invalid value for {} option '{}': {}"
    
    64
    +                                .format(prefix, self.OPTION_TYPE, self.name, e))
    
    65
    +
    
    66
    +        if default_value is None:
    
    67
    +            # Host architecture is not supported by the project.
    
    68
    +            # Do not raise an error here as the user may override it.
    
    69
    +            # If the user does not override it, an error will be raised
    
    70
    +            # by resolve()/validate().
    
    71
    +            default_value = arch
    
    72
    +
    
    73
    +        return default_value
    
    45 74
     
    
    46 75
         def resolve(self):
    
    47 76
     
    

  • buildstream/_pipeline.py
    ... ... @@ -22,6 +22,7 @@
    22 22
     import os
    
    23 23
     import itertools
    
    24 24
     from operator import itemgetter
    
    25
    +from collections import OrderedDict
    
    25 26
     
    
    26 27
     from ._exceptions import PipelineError
    
    27 28
     from ._message import Message, MessageType
    
    ... ... @@ -479,7 +480,7 @@ class Pipeline():
    479 480
     #
    
    480 481
     class _Planner():
    
    481 482
         def __init__(self):
    
    482
    -        self.depth_map = {}
    
    483
    +        self.depth_map = OrderedDict()
    
    483 484
             self.visiting_elements = set()
    
    484 485
     
    
    485 486
         # Here we want to traverse the same element more than once when
    

  • buildstream/_platform/platform.py
    ... ... @@ -77,20 +77,17 @@ class Platform():
    77 77
         def get_host_os():
    
    78 78
             return os.uname()[0]
    
    79 79
     
    
    80
    -    # get_host_arch():
    
    80
    +    # canonicalize_arch():
    
    81 81
         #
    
    82
    -    # This returns the architecture of the host machine. The possible values
    
    83
    -    # map from uname -m in order to be a OS independent list.
    
    82
    +    # This returns the canonical, OS-independent architecture name
    
    83
    +    # or raises a PlatformError if the architecture is unknown.
    
    84 84
         #
    
    85
    -    # Returns:
    
    86
    -    #    (string): String representing the architecture
    
    87 85
         @staticmethod
    
    88
    -    def get_host_arch():
    
    89
    -        # get the hardware identifier from uname
    
    90
    -        uname_machine = os.uname()[4]
    
    91
    -        uname_to_arch = {
    
    86
    +    def canonicalize_arch(arch):
    
    87
    +        aliases = {
    
    88
    +            "aarch32": "aarch32",
    
    92 89
                 "aarch64": "aarch64",
    
    93
    -            "aarch64_be": "aarch64-be",
    
    90
    +            "aarch64-be": "aarch64-be",
    
    94 91
                 "amd64": "x86-64",
    
    95 92
                 "arm": "aarch32",
    
    96 93
                 "armv8l": "aarch64",
    
    ... ... @@ -99,17 +96,34 @@ class Platform():
    99 96
                 "i486": "x86-32",
    
    100 97
                 "i586": "x86-32",
    
    101 98
                 "i686": "x86-32",
    
    99
    +            "power-isa-be": "power-isa-be",
    
    100
    +            "power-isa-le": "power-isa-le",
    
    102 101
                 "ppc64": "power-isa-be",
    
    103 102
                 "ppc64le": "power-isa-le",
    
    104 103
                 "sparc": "sparc-v9",
    
    105 104
                 "sparc64": "sparc-v9",
    
    106
    -            "x86_64": "x86-64"
    
    105
    +            "sparc-v9": "sparc-v9",
    
    106
    +            "x86-32": "x86-32",
    
    107
    +            "x86-64": "x86-64"
    
    107 108
             }
    
    109
    +
    
    108 110
             try:
    
    109
    -            return uname_to_arch[uname_machine]
    
    111
    +            return aliases[arch.replace('_', '-')]
    
    110 112
             except KeyError:
    
    111
    -            raise PlatformError("uname gave unsupported machine architecture: {}"
    
    112
    -                                .format(uname_machine))
    
    113
    +            raise PlatformError("Unknown architecture: {}".format(arch))
    
    114
    +
    
    115
    +    # get_host_arch():
    
    116
    +    #
    
    117
    +    # This returns the architecture of the host machine. The possible values
    
    118
    +    # map from uname -m in order to be a OS independent list.
    
    119
    +    #
    
    120
    +    # Returns:
    
    121
    +    #    (string): String representing the architecture
    
    122
    +    @staticmethod
    
    123
    +    def get_host_arch():
    
    124
    +        # get the hardware identifier from uname
    
    125
    +        uname_machine = os.uname()[4]
    
    126
    +        return Platform.canonicalize_arch(uname_machine)
    
    113 127
     
    
    114 128
         ##################################################################
    
    115 129
         #                        Sandbox functions                       #
    

  • buildstream/_scheduler/queues/queue.py
    ... ... @@ -170,9 +170,9 @@ class Queue():
    170 170
             skip = [job for job in jobs if self.status(job.element) == QueueStatus.SKIP]
    
    171 171
             wait = [job for job in jobs if job not in skip]
    
    172 172
     
    
    173
    +        self.skipped_elements.extend([job.element for job in skip])
    
    173 174
             self._wait_queue.extend(wait)
    
    174 175
             self._done_queue.extend(skip)
    
    175
    -        self.skipped_elements.extend(skip)
    
    176 176
     
    
    177 177
         # dequeue()
    
    178 178
         #
    

  • buildstream/element.py
    ... ... @@ -2441,11 +2441,17 @@ class Element(Plugin):
    2441 2441
             # Sandbox config, unlike others, has fixed members so we should validate them
    
    2442 2442
             _yaml.node_validate(sandbox_config, ['build-uid', 'build-gid', 'build-os', 'build-arch'])
    
    2443 2443
     
    
    2444
    +        build_arch = self.node_get_member(sandbox_config, str, 'build-arch', default=None)
    
    2445
    +        if build_arch:
    
    2446
    +            build_arch = Platform.canonicalize_arch(build_arch)
    
    2447
    +        else:
    
    2448
    +            build_arch = host_arch
    
    2449
    +
    
    2444 2450
             return SandboxConfig(
    
    2445 2451
                 self.node_get_member(sandbox_config, int, 'build-uid'),
    
    2446 2452
                 self.node_get_member(sandbox_config, int, 'build-gid'),
    
    2447 2453
                 self.node_get_member(sandbox_config, str, 'build-os', default=host_os),
    
    2448
    -            self.node_get_member(sandbox_config, str, 'build-arch', default=host_arch))
    
    2454
    +            build_arch)
    
    2449 2455
     
    
    2450 2456
         # This makes a special exception for the split rules, which
    
    2451 2457
         # elements may extend but whos defaults are defined in the project.
    

  • buildstream/plugins/elements/script.py
    ... ... @@ -42,6 +42,9 @@ import buildstream
    42 42
     class ScriptElement(buildstream.ScriptElement):
    
    43 43
         # pylint: disable=attribute-defined-outside-init
    
    44 44
     
    
    45
    +    # This plugin has been modified to avoid the use of Sandbox.get_directory
    
    46
    +    BST_VIRTUAL_DIRECTORY = True
    
    47
    +
    
    45 48
         def configure(self, node):
    
    46 49
             for n in self.node_get_member(node, list, 'layout', []):
    
    47 50
                 dst = self.node_subst_member(n, 'destination')
    

  • requirements/dev-requirements.txt
    ... ... @@ -2,7 +2,7 @@ coverage==4.4
    2 2
     pylint==2.2.2
    
    3 3
     pycodestyle==2.4.0
    
    4 4
     pytest==4.0.2
    
    5
    -pytest-cov==2.6.0
    
    5
    +pytest-cov==2.6.1
    
    6 6
     pytest-datafiles==2.0
    
    7 7
     pytest-env==0.6.2
    
    8 8
     pytest-xdist==1.25.0
    

  • tests/format/option-arch-alias/element.bst
    1
    +kind: autotools
    
    2
    +variables:
    
    3
    +  result: "Nothing"
    
    4
    +  (?):
    
    5
    +  - machine_arch == "arm":
    
    6
    +      result: "Army"
    
    7
    +  - machine_arch == "x86_64":
    
    8
    +      result: "X86-64y"

  • tests/format/option-arch-alias/project.conf
    1
    +name: test
    
    2
    +
    
    3
    +options:
    
    4
    +  machine_arch:
    
    5
    +    type: arch
    
    6
    +    description: The machine architecture
    
    7
    +    values:
    
    8
    +    - arm
    
    9
    +    - x86_64

  • tests/format/option-arch-unknown/element.bst
    1
    +kind: autotools
    
    2
    +variables:
    
    3
    +  result: "Nothing"
    
    4
    +  (?):
    
    5
    +  - machine_arch == "aarch32":
    
    6
    +      result: "Army"
    
    7
    +  - machine_arch == "aarch64":
    
    8
    +      result: "Aarchy"
    
    9
    +  - machine_arch == "x86-128":
    
    10
    +      result: "X86-128y"

  • tests/format/option-arch-unknown/project.conf
    1
    +name: test
    
    2
    +
    
    3
    +options:
    
    4
    +  machine_arch:
    
    5
    +    type: arch
    
    6
    +    description: The machine architecture
    
    7
    +    values:
    
    8
    +    - aarch32
    
    9
    +    - aarch64
    
    10
    +    - x86-128

  • tests/format/optionarch.py
    ... ... @@ -75,3 +75,47 @@ def test_unsupported_arch(cli, datafiles):
    75 75
             ])
    
    76 76
     
    
    77 77
             result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
    
    78
    +
    
    79
    +
    
    80
    +@pytest.mark.datafiles(DATA_DIR)
    
    81
    +def test_alias(cli, datafiles):
    
    82
    +
    
    83
    +    with override_uname_arch("arm"):
    
    84
    +        project = os.path.join(datafiles.dirname, datafiles.basename, 'option-arch-alias')
    
    85
    +        result = cli.run(project=project, silent=True, args=[
    
    86
    +            'show',
    
    87
    +            '--deps', 'none',
    
    88
    +            '--format', '%{vars}',
    
    89
    +            'element.bst'
    
    90
    +        ])
    
    91
    +
    
    92
    +        result.assert_success()
    
    93
    +
    
    94
    +
    
    95
    +@pytest.mark.datafiles(DATA_DIR)
    
    96
    +def test_unknown_host_arch(cli, datafiles):
    
    97
    +
    
    98
    +    with override_uname_arch("x86_128"):
    
    99
    +        project = os.path.join(datafiles.dirname, datafiles.basename, 'option-arch')
    
    100
    +        result = cli.run(project=project, silent=True, args=[
    
    101
    +            'show',
    
    102
    +            '--deps', 'none',
    
    103
    +            '--format', '%{vars}',
    
    104
    +            'element.bst'
    
    105
    +        ])
    
    106
    +
    
    107
    +        result.assert_main_error(ErrorDomain.PLATFORM, None)
    
    108
    +
    
    109
    +
    
    110
    +@pytest.mark.datafiles(DATA_DIR)
    
    111
    +def test_unknown_project_arch(cli, datafiles):
    
    112
    +
    
    113
    +    project = os.path.join(datafiles.dirname, datafiles.basename, 'option-arch-unknown')
    
    114
    +    result = cli.run(project=project, silent=True, args=[
    
    115
    +        'show',
    
    116
    +        '--deps', 'none',
    
    117
    +        '--format', '%{vars}',
    
    118
    +        'element.bst'
    
    119
    +    ])
    
    120
    +
    
    121
    +    result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)

  • tests/frontend/order.py
    1
    +import os
    
    2
    +
    
    3
    +import pytest
    
    4
    +from tests.testutils import cli, create_repo
    
    5
    +
    
    6
    +from buildstream import _yaml
    
    7
    +
    
    8
    +# Project directory
    
    9
    +DATA_DIR = os.path.join(
    
    10
    +    os.path.dirname(os.path.realpath(__file__)),
    
    11
    +    "project",
    
    12
    +)
    
    13
    +
    
    14
    +
    
    15
    +def create_element(repo, name, path, dependencies, ref=None):
    
    16
    +    element = {
    
    17
    +        'kind': 'import',
    
    18
    +        'sources': [
    
    19
    +            repo.source_config(ref=ref)
    
    20
    +        ],
    
    21
    +        'depends': dependencies
    
    22
    +    }
    
    23
    +    _yaml.dump(element, os.path.join(path, name))
    
    24
    +
    
    25
    +
    
    26
    +# This tests a variety of scenarios and checks that the order in
    
    27
    +# which things are processed remains stable.
    
    28
    +#
    
    29
    +# This is especially important in order to ensure that our
    
    30
    +# depth sorting and optimization of which elements should be
    
    31
    +# processed first is doing it's job right, and that we are
    
    32
    +# promoting elements to the build queue as soon as possible
    
    33
    +#
    
    34
    +# Parameters:
    
    35
    +#    targets (target elements): The targets to invoke bst with
    
    36
    +#    template (dict): The project template dictionary, for create_element()
    
    37
    +#    expected (list): A list of element names in the expected order
    
    38
    +#
    
    39
    +@pytest.mark.datafiles(os.path.join(DATA_DIR))
    
    40
    +@pytest.mark.parametrize("target,template,expected", [
    
    41
    +    # First simple test
    
    42
    +    ('3.bst', {
    
    43
    +        '0.bst': ['1.bst'],
    
    44
    +        '1.bst': [],
    
    45
    +        '2.bst': ['0.bst'],
    
    46
    +        '3.bst': ['0.bst', '1.bst', '2.bst']
    
    47
    +    }, ['1.bst', '0.bst', '2.bst', '3.bst']),
    
    48
    +
    
    49
    +    # A more complicated test with build of build dependencies
    
    50
    +    ('target.bst', {
    
    51
    +        'a.bst': [],
    
    52
    +        'base.bst': [],
    
    53
    +        'timezones.bst': [],
    
    54
    +        'middleware.bst': [{'filename': 'base.bst', 'type': 'build'}],
    
    55
    +        'app.bst': [{'filename': 'middleware.bst', 'type': 'build'}],
    
    56
    +        'target.bst': ['a.bst', 'base.bst', 'middleware.bst', 'app.bst', 'timezones.bst']
    
    57
    +    }, ['base.bst', 'middleware.bst', 'a.bst', 'app.bst', 'timezones.bst', 'target.bst']),
    
    58
    +])
    
    59
    +@pytest.mark.parametrize("operation", [('show'), ('fetch'), ('build')])
    
    60
    +def test_order(cli, datafiles, tmpdir, operation, target, template, expected):
    
    61
    +    project = os.path.join(datafiles.dirname, datafiles.basename)
    
    62
    +    dev_files_path = os.path.join(project, 'files', 'dev-files')
    
    63
    +    element_path = os.path.join(project, 'elements')
    
    64
    +
    
    65
    +    # FIXME: Remove this when the test passes reliably.
    
    66
    +    #
    
    67
    +    #        There is no reason why the order should not
    
    68
    +    #        be preserved when the builders is set to 1,
    
    69
    +    #        the scheduler queue processing still seems to
    
    70
    +    #        be losing the order.
    
    71
    +    #
    
    72
    +    if operation == 'build':
    
    73
    +        pytest.skip("FIXME: This still only sometimes passes")
    
    74
    +
    
    75
    +    # Configure to only allow one fetcher at a time, make it easy to
    
    76
    +    # determine what is being planned in what order.
    
    77
    +    cli.configure({
    
    78
    +        'scheduler': {
    
    79
    +            'fetchers': 1,
    
    80
    +            'builders': 1
    
    81
    +        }
    
    82
    +    })
    
    83
    +
    
    84
    +    # Build the project from the template, make import elements
    
    85
    +    # all with the same repo
    
    86
    +    #
    
    87
    +    repo = create_repo('git', str(tmpdir))
    
    88
    +    ref = repo.create(dev_files_path)
    
    89
    +    for element, dependencies in template.items():
    
    90
    +        create_element(repo, element, element_path, dependencies, ref=ref)
    
    91
    +        repo.add_commit()
    
    92
    +
    
    93
    +    # Run test and collect results
    
    94
    +    if operation == 'show':
    
    95
    +        result = cli.run(args=['show', '--deps', 'plan', '--format', '%{name}', target], project=project, silent=True)
    
    96
    +        result.assert_success()
    
    97
    +        results = result.output.splitlines()
    
    98
    +    else:
    
    99
    +        if operation == 'fetch':
    
    100
    +            result = cli.run(args=['source', 'fetch', target], project=project, silent=True)
    
    101
    +        else:
    
    102
    +            result = cli.run(args=[operation, target], project=project, silent=True)
    
    103
    +        result.assert_success()
    
    104
    +        results = result.get_start_order(operation)
    
    105
    +
    
    106
    +    # Assert the order
    
    107
    +    print("Expected order: {}".format(expected))
    
    108
    +    print("Observed result order: {}".format(results))
    
    109
    +    assert results == expected

  • tests/testutils/runcli.py
    ... ... @@ -167,6 +167,23 @@ class Result():
    167 167
         def assert_shell_error(self, fail_message=''):
    
    168 168
             assert self.exit_code == 1, fail_message
    
    169 169
     
    
    170
    +    # get_start_order()
    
    171
    +    #
    
    172
    +    # Gets the list of elements processed in a given queue, in the
    
    173
    +    # order of their first appearances in the session.
    
    174
    +    #
    
    175
    +    # Args:
    
    176
    +    #    activity (str): The queue activity name (like 'fetch')
    
    177
    +    #
    
    178
    +    # Returns:
    
    179
    +    #    (list): A list of element names in the order which they first appeared in the result
    
    180
    +    #
    
    181
    +    def get_start_order(self, activity):
    
    182
    +        results = re.findall(r'\[\s*{}:(\S+)\s*\]\s*START\s*.*\.log'.format(activity), self.stderr)
    
    183
    +        if results is None:
    
    184
    +            return []
    
    185
    +        return list(results)
    
    186
    +
    
    170 187
         # get_tracked_elements()
    
    171 188
         #
    
    172 189
         # Produces a list of element names on which tracking occurred
    

  • tox.ini
    1
    +#
    
    2
    +# Tox global configuration
    
    3
    +#
    
    1 4
     [tox]
    
    2 5
     envlist = py35,py36,py37
    
    3 6
     skip_missing_interpreters = true
    
    4 7
     
    
    8
    +#
    
    9
    +# Defaults for all environments
    
    10
    +#
    
    11
    +# Anything specified here is iherited by the sections
    
    12
    +#
    
    5 13
     [testenv]
    
    6
    -commands = pytest {posargs}
    
    14
    +commands =
    
    15
    +    pytest --basetemp {envtmpdir} {posargs}
    
    16
    +    mkdir -p .coverage-reports
    
    17
    +    mv {envtmpdir}/.coverage {toxinidir}/.coverage-reports/.coverage.{env:COVERAGE_PREFIX:}{envname}
    
    7 18
     deps =
    
    8 19
         -rrequirements/requirements.txt
    
    9 20
         -rrequirements/dev-requirements.txt
    
    ... ... @@ -13,6 +24,32 @@ passenv =
    13 24
         GI_TYPELIB_PATH
    
    14 25
         INTEGRATION_CACHE
    
    15 26
     
    
    27
    +#
    
    28
    +# These keys are not inherited by any other sections
    
    29
    +#
    
    30
    +setenv =
    
    31
    +    py{35,36,37}: COVERAGE_FILE = {envtmpdir}/.coverage
    
    32
    +whitelist_externals =
    
    33
    +    py{35,36,37}:
    
    34
    +        mv
    
    35
    +        mkdir
    
    36
    +
    
    37
    +#
    
    38
    +# Coverage reporting
    
    39
    +#
    
    40
    +[testenv:coverage]
    
    41
    +commands =
    
    42
    +    - coverage combine --rcfile={toxinidir}/.coveragerc {toxinidir}/.coverage-reports/
    
    43
    +    coverage report --rcfile={toxinidir}/.coveragerc -m
    
    44
    +deps =
    
    45
    +    -rrequirements/requirements.txt
    
    46
    +    -rrequirements/dev-requirements.txt
    
    47
    +setenv =
    
    48
    +    COVERAGE_FILE = {toxinidir}/.coverage-reports/.coverage
    
    49
    +
    
    50
    +#
    
    51
    +# Running linters
    
    52
    +#
    
    16 53
     [testenv:lint]
    
    17 54
     commands =
    
    18 55
         pycodestyle
    
    ... ... @@ -22,6 +59,9 @@ deps =
    22 59
         -rrequirements/dev-requirements.txt
    
    23 60
         -rrequirements/plugin-requirements.txt
    
    24 61
     
    
    62
    +#
    
    63
    +# Building documentation
    
    64
    +#
    
    25 65
     [testenv:docs]
    
    26 66
     commands =
    
    27 67
         make -C doc
    



  • [Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]