[tracker/sam/tracker-3.0-functional-tests: 3/3] WIP: Update functional tests for Tracker 3.0 changes



commit b6f083b673d15329f06160d9b6f17a61378cc984
Author: Sam Thursfield <sam afuera me uk>
Date:   Tue Jan 21 00:29:01 2020 +0100

    WIP: Update functional tests for Tracker 3.0 changes
    
    This commit updates the functional tests in line with the
    Tracker 3.0 architectural changes.
    
    These tests used to spawn a private tracker-store daemon and communicate
    over D-Bus. Most of the tests now create a local database in-process and
    run tests against that, using the PyGObject bindings to drive
    libtracker-sparql. It's good to test our Python bindings in this way.
    
    The 15-statistics test is removed as the API that it tests is gone.
    
    It's debatable if the name "functional tests" really applies here any
    more, perhaps a bigger distinction from the "unit tests" which also run
    against libtracker-sparql is that these are written using Python and
    PyGObject instead of C. In tracker-miners.git we will still be testing
    multiple processes so the name will still make sense there.

 src/libtracker-sparql-backend/meson.build     |   2 +
 tests/functional-tests/15-statistics.py       | 126 -------------
 tests/functional-tests/17-ontology-changes.py |  47 +++--
 tests/functional-tests/meson.build            |   3 +-
 tests/functional-tests/storetest.py           |  24 +--
 utils/trackertestutils/helpers.py             | 257 ++++----------------------
 6 files changed, 73 insertions(+), 386 deletions(-)
---
diff --git a/src/libtracker-sparql-backend/meson.build b/src/libtracker-sparql-backend/meson.build
index 75d3f6080..6659e84f9 100644
--- a/src/libtracker-sparql-backend/meson.build
+++ b/src/libtracker-sparql-backend/meson.build
@@ -60,3 +60,5 @@ tracker_sparql_gir = gnome.generate_gir(libtracker_sparql,
         'libtracker-sparql/tracker-sparql.h',
         '-DTRACKER_COMPILATION',
     ])
+
+tracker_sparql_uninstalled_dir = meson.current_build_dir()
diff --git a/tests/functional-tests/17-ontology-changes.py b/tests/functional-tests/17-ontology-changes.py
index defa1e431..50cd08eba 100644
--- a/tests/functional-tests/17-ontology-changes.py
+++ b/tests/functional-tests/17-ontology-changes.py
@@ -22,7 +22,11 @@ Stand-alone tests cases for the store, booting it with different ontology
 changes and checking if the data is still there.
 """
 
+import gi
+gi.require_version('Tracker', '3.0')
 from gi.repository import GLib
+from gi.repository import Gio
+from gi.repository import Tracker
 
 import os
 import pathlib
@@ -67,38 +71,33 @@ class OntologyChangeTestTemplate (ut.TestCase):
     def tearDown(self):
         shutil.rmtree(self.tmpdir, ignore_errors=True)
 
-    def get_ontology_dir(self, param):
-        return str(pathlib.Path(__file__).parent.joinpath('test-ontologies', param))
+    def ontology_path(self, param):
+        return pathlib.Path(__file__).parent.joinpath('test-ontologies', param)
 
     def template_test_ontology_change(self):
         self.set_ontology_dirs()
 
         self.__assert_ontology_dates(self.FIRST_ONTOLOGY_DIR, self.SECOND_ONTOLOGY_DIR)
 
-        extra_env = cfg.test_environment(self.tmpdir)
-        extra_env['LANG'] = 'en_GB.utf8'
-        extra_env['LC_COLLATE'] = 'en_GB.utf8'
-        extra_env['TRACKER_DB_ONTOLOGIES_DIR'] = self.get_ontology_dir(self.FIRST_ONTOLOGY_DIR)
-
-        sandbox1 = trackertestutils.helpers.TrackerDBusSandbox(
-            cfg.TEST_DBUS_DAEMON_CONFIG_FILE, extra_env=extra_env)
-        sandbox1.start()
-
-        self.tracker = trackertestutils.helpers.StoreHelper(sandbox1.get_connection())
-        self.tracker.start_and_wait_for_ready()
+        # Create a local store with the first set of ontologies.
+        conn1 = Tracker.SparqlConnection.new(
+            Tracker.SparqlConnectionFlags.NONE,
+            Gio.File.new_for_path(self.tmpdir),
+            Gio.File.new_for_path(str(self.ontology_path(self.FIRST_ONTOLOGY_DIR))),
+            None)
 
+        self.tracker = trackertestutils.helpers.StoreHelper(conn1)
         self.insert_data()
 
-        sandbox1.stop()
-
-        # Boot the second set of ontologies
-        extra_env['TRACKER_DB_ONTOLOGIES_DIR'] = self.get_ontology_dir(self.SECOND_ONTOLOGY_DIR)
-        sandbox2 = trackertestutils.helpers.TrackerDBusSandbox(
-            cfg.TEST_DBUS_DAEMON_CONFIG_FILE, extra_env=extra_env)
-        sandbox2.start()
+        conn1.close()
 
-        self.tracker = trackertestutils.helpers.StoreHelper(sandbox2.get_connection())
-        self.tracker.start_and_wait_for_ready()
+        # Reopen the local store with the second set of ontologies.
+        conn2 = Tracker.SparqlConnection.new(
+            Tracker.SparqlConnectionFlags.NONE,
+            Gio.File.new_for_path(self.tmpdir),
+            Gio.File.new_for_path(str(self.ontology_path(self.SECOND_ONTOLOGY_DIR))),
+            None)
+        self.tracker = trackertestutils.helpers.StoreHelper(conn2)
 
         self.validate_status()
 
@@ -168,9 +167,9 @@ class OntologyChangeTestTemplate (ut.TestCase):
                         break
 
         first_date = get_ontology_date(
-            os.path.join(self.get_ontology_dir(first), "91-test.ontology"))
+            self.ontology_path(first).joinpath("91-test.ontology"))
         second_date = get_ontology_date(
-            os.path.join(self.get_ontology_dir(second), "91-test.ontology"))
+            self.ontology_path(second).joinpath("91-test.ontology"))
         if first_date >= second_date:
             self.fail("nao:modifiedTime in '%s' is not more recent in the second ontology" % (
                 "91-test.ontology"))
diff --git a/tests/functional-tests/meson.build b/tests/functional-tests/meson.build
index cd9b27c32..4a2d99983 100644
--- a/tests/functional-tests/meson.build
+++ b/tests/functional-tests/meson.build
@@ -31,7 +31,6 @@ functional_tests = [
   '08-unique-insertions',
   '09-concurrent-query',
   '14-signals',
-  '15-statistics',
   '16-collation',
   '17-ontology-changes',
 ]
@@ -42,6 +41,8 @@ endif
 
 test_env = environment()
 test_env.prepend('PYTHONPATH', tracker_uninstalled_testutils_dir)
+test_env.prepend('GI_TYPELIB_PATH', tracker_sparql_uninstalled_dir)
+test_env.prepend('LD_LIBRARY_PATH', tracker_sparql_uninstalled_dir)
 test_env.set('TRACKER_FUNCTIONAL_TEST_CONFIG', config_json_full_path)
 
 foreach t: functional_tests
diff --git a/tests/functional-tests/storetest.py b/tests/functional-tests/storetest.py
index d96294cc4..6f645810a 100644
--- a/tests/functional-tests/storetest.py
+++ b/tests/functional-tests/storetest.py
@@ -18,6 +18,11 @@
 # 02110-1301, USA.
 #
 
+import gi
+gi.require_version('Tracker', '3.0')
+from gi.repository import Gio
+from gi.repository import Tracker
+
 import os
 import shutil
 import tempfile
@@ -40,25 +45,16 @@ class CommonTrackerStoreTest (ut.TestCase):
         self.tmpdir = tempfile.mkdtemp(prefix='tracker-test-')
 
         try:
-            extra_env = cfg.test_environment(self.tmpdir)
-            extra_env['LANG'] = 'en_GB.utf8'
-            extra_env['LC_COLLATE'] = 'en_GB.utf8'
-
-            self.sandbox = trackertestutils.helpers.TrackerDBusSandbox(
-                dbus_daemon_config_file=cfg.TEST_DBUS_DAEMON_CONFIG_FILE, extra_env=extra_env)
-            self.sandbox.start()
+            self.conn = Tracker.SparqlConnection.new(
+                Tracker.SparqlConnectionFlags.NONE,
+                Gio.File.new_for_path(self.tmpdir), None, None)
 
-            self.tracker = trackertestutils.helpers.StoreHelper(
-                self.sandbox.get_connection())
-            self.tracker.start_and_wait_for_ready()
-            self.tracker.start_watching_updates()
+            self.tracker = trackertestutils.helpers.StoreHelper(self.conn)
         except Exception as e:
             shutil.rmtree(self.tmpdir, ignore_errors=True)
             raise
 
     @classmethod
     def tearDownClass(self):
-        self.tracker.stop_watching_updates()
-        self.sandbox.stop()
-
+        self.conn.close()
         shutil.rmtree(self.tmpdir, ignore_errors=True)
diff --git a/utils/trackertestutils/helpers.py b/utils/trackertestutils/helpers.py
index 037eb032c..73859b178 100644
--- a/utils/trackertestutils/helpers.py
+++ b/utils/trackertestutils/helpers.py
@@ -18,6 +18,9 @@
 # 02110-1301, USA.
 #
 
+import gi
+gi.require_version('Tracker', '3.0')
+from gi.repository import Tracker
 from gi.repository import Gio
 from gi.repository import GLib
 
@@ -34,7 +37,7 @@ from . import psutil_mini as psutil
 log = logging.getLogger(__name__)
 
 
-class GraphUpdateTimeoutException(RuntimeError):
+class EventTimeoutException(RuntimeError):
     pass
 
 
@@ -59,115 +62,37 @@ atexit.register(_cleanup_processes)
 
 class StoreHelper():
     """
-    Helper for testing the tracker-store daemon.
+    Helper for testing database access with libtracker-sparql.
     """
 
-    TRACKER_BUSNAME = 'org.freedesktop.Tracker1'
-    TRACKER_OBJ_PATH = '/org/freedesktop/Tracker1/Resources'
-    RESOURCES_IFACE = "org.freedesktop.Tracker1.Resources"
-
-    TRACKER_BACKUP_OBJ_PATH = "/org/freedesktop/Tracker1/Backup"
-    BACKUP_IFACE = "org.freedesktop.Tracker1.Backup"
-
-    TRACKER_STATS_OBJ_PATH = "/org/freedesktop/Tracker1/Statistics"
-    STATS_IFACE = "org.freedesktop.Tracker1.Statistics"
-
-    TRACKER_STATUS_OBJ_PATH = "/org/freedesktop/Tracker1/Status"
-    STATUS_IFACE = "org.freedesktop.Tracker1.Status"
-
-    def __init__(self, dbus_connection):
+    def __init__(self, conn):
         self.log = logging.getLogger(__name__)
         self.loop = mainloop.MainLoop()
 
-        self.bus = dbus_connection
-        self.graph_updated_handler_id = 0
-
-        self.resources = Gio.DBusProxy.new_sync(
-            self.bus, Gio.DBusProxyFlags.DO_NOT_AUTO_START_AT_CONSTRUCTION, None,
-            self.TRACKER_BUSNAME, self.TRACKER_OBJ_PATH, self.RESOURCES_IFACE)
-
-        self.backup_iface = Gio.DBusProxy.new_sync(
-            self.bus, Gio.DBusProxyFlags.DO_NOT_AUTO_START_AT_CONSTRUCTION, None,
-            self.TRACKER_BUSNAME, self.TRACKER_BACKUP_OBJ_PATH, self.BACKUP_IFACE)
-
-        self.stats_iface = Gio.DBusProxy.new_sync(
-            self.bus, Gio.DBusProxyFlags.DO_NOT_AUTO_START_AT_CONSTRUCTION, None,
-            self.TRACKER_BUSNAME, self.TRACKER_STATS_OBJ_PATH, self.STATS_IFACE)
-
-        self.status_iface = Gio.DBusProxy.new_sync(
-            self.bus, Gio.DBusProxyFlags.DO_NOT_AUTO_START_AT_CONSTRUCTION, None,
-            self.TRACKER_BUSNAME, self.TRACKER_STATUS_OBJ_PATH, self.STATUS_IFACE)
-
-    def start_and_wait_for_ready(self):
-        # The daemon is autostarted as soon as a method is called.
-        #
-        # We set a big timeout to avoid interfering when a daemon is being
-        # interactively debugged.
-        self.log.debug("Calling %s.Wait() method", self.STATUS_IFACE)
-        self.status_iface.call_sync('Wait', None, Gio.DBusCallFlags.NONE, 1000000, None)
-        self.log.debug("Ready")
+        self.conn = conn
+        self.notifier = None
 
     def start_watching_updates(self):
-        assert self.graph_updated_handler_id == 0
-
-        self.reset_graph_updates_tracking()
-
-        def signal_handler(proxy, sender_name, signal_name, parameters):
-            if signal_name == 'GraphUpdated':
-                self._graph_updated_cb(*parameters.unpack())
-
-        self.graph_updated_handler_id = self.resources.connect(
-            'g-signal', signal_handler)
-        self.log.debug("Watching for updates from Resources interface")
+        assert self.notifier is None
+        self.notifier = self.conn.create_notifier(Tracker.NotifierFlags.NONE)
+        self.notifier.connect('events', self._notifier_events_cb)
+        self.log.debug("Watching for updates from TrackerNotifier interface")
 
     def stop_watching_updates(self):
-        if self.graph_updated_handler_id != 0:
+        if self.notifier:
+            del self.notifier
             self.log.debug("No longer watching for updates from Resources interface")
-            self.resources.disconnect(self.graph_updated_handler_id)
-            self.graph_updated_handler_id = 0
-
-    # A system to follow GraphUpdated and make sure all changes are tracked.
-    # This code saves every change notification received, and exposes methods
-    # to await insertion or deletion of a certain resource which first check
-    # the list of events already received and wait for more if the event has
-    # not yet happened.
-
-    def reset_graph_updates_tracking(self):
-        self.class_to_track = None
-        self.inserts_list = []
-        self.deletes_list = []
-        self.inserts_match_function = None
-        self.deletes_match_function = None
-
-    def _graph_updated_timeout_cb(self):
-        raise GraphUpdateTimeoutException()
-
-    def _graph_updated_cb(self, class_name, deletes_list, inserts_list):
+
+    def _notifier_events_timeout_cb(self):
+        raise EventTimeoutException()
+
+    def _notifier_events_cb(self, service, graph, events, user_data):
         """
-        Process notifications from tracker-store on resource changes.
+        Process notifications from store on resource changes.
         """
-        exit_loop = False
-
-        if class_name == self.class_to_track:
-            self.log.debug("GraphUpdated for %s: %i deletes, %i inserts", class_name, len(deletes_list), 
len(inserts_list))
-
-            if inserts_list is not None:
-                if self.inserts_match_function is not None:
-                    # The match function will remove matched entries from the list
-                    (exit_loop, inserts_list) = self.inserts_match_function(inserts_list)
-                self.inserts_list += inserts_list
-
-            if not exit_loop and deletes_list is not None:
-                if self.deletes_match_function is not None:
-                    (exit_loop, deletes_list) = self.deletes_match_function(deletes_list)
-                self.deletes_list += deletes_list
-
-            if exit_loop:
-                GLib.source_remove(self.graph_updated_timeout_id)
-                self.graph_updated_timeout_id = 0
-                self.loop.quit()
-        else:
-            self.log.debug("Ignoring GraphUpdated for class %s, currently tracking %s", class_name, 
self.class_to_track)
+
+        self.log("Got %i events from %s, %s", len(events), service, graph)
+        # FIXME: This needs to change completely.
 
     def _enable_await_timeout(self):
         self.graph_updated_timeout_id = GLib.timeout_add_seconds(REASONABLE_TIMEOUT,
@@ -177,14 +102,6 @@ class StoreHelper():
         """
         Block until a resource matching the parameters becomes available
         """
-        assert (self.inserts_match_function == None)
-        assert (self.class_to_track == None), "Already waiting for resource of type %s" % self.class_to_track
-        assert (self.graph_updated_handler_id != 0), "You must call start_watching_updates() first."
-
-        self.class_to_track = rdf_class
-
-        self.matched_resource_urn = None
-        self.matched_resource_id = None
 
         self.log.debug("Await new %s (%i existing inserts)", rdf_class, len(self.inserts_list))
 
@@ -237,69 +154,12 @@ class StoreHelper():
             matched = matched_creation if required_property is None else matched_required_property
             return matched, remaining_events
 
-        def match_cb(inserts_list):
-            matched, remaining_events = find_resource_insertion(inserts_list)
-            exit_loop = matched
-            return exit_loop, remaining_events
-
-        # Check the list of previously received events for matches
-        (existing_match, self.inserts_list) = find_resource_insertion(self.inserts_list)
-
-        if not existing_match:
-            self._enable_await_timeout()
-            self.inserts_match_function = match_cb
-            # Run the event loop until the correct notification arrives
-            try:
-                self.loop.run_checked()
-            except GraphUpdateTimeoutException:
-                raise GraphUpdateTimeoutException("Timeout waiting for resource: class %s, URL %s, title %s" 
% (rdf_class, url, title)) from None
-            self.inserts_match_function = None
-
-        self.class_to_track = None
-        return (self.matched_resource_id, self.matched_resource_urn)
+        return (None, None)
 
     def await_resource_deleted(self, rdf_class, id):
         """
         Block until we are notified of a resources deletion
         """
-        assert (self.deletes_match_function == None)
-        assert (self.class_to_track == None)
-        assert (self.graph_updated_handler_id != 0), "You must call start_watching_updates() first."
-
-        def find_resource_deletion(deletes_list):
-            self.log.debug("find_resource_deletion: looking for %i in %s", id, deletes_list)
-
-            matched = False
-            remaining_events = []
-
-            for delete in deletes_list:
-                if delete[1] == id:
-                    matched = True
-                else:
-                    remaining_events += [delete]
-
-            return matched, remaining_events
-
-        def match_cb(deletes_list):
-            matched, remaining_events = find_resource_deletion(deletes_list)
-            exit_loop = matched
-            return exit_loop, remaining_events
-
-        self.log.debug("Await deletion of %i (%i existing)", id, len(self.deletes_list))
-
-        (existing_match, self.deletes_list) = find_resource_deletion(self.deletes_list)
-
-        if not existing_match:
-            self._enable_await_timeout()
-            self.class_to_track = rdf_class
-            self.deletes_match_function = match_cb
-            # Run the event loop until the correct notification arrives
-            try:
-                self.loop.run_checked()
-            except GraphUpdateTimeoutException as e:
-                raise GraphUpdateTimeoutException("Resource %i has not been deleted." % id) from e
-            self.deletes_match_function = None
-            self.class_to_track = None
 
         return
 
@@ -307,64 +167,19 @@ class StoreHelper():
         """
         Block until a property of a resource is updated or inserted.
         """
-        assert (self.inserts_match_function == None)
-        assert (self.deletes_match_function == None)
-        assert (self.class_to_track == None)
-        assert (self.graph_updated_handler_id != 0), "You must call start_watching_updates() first."
-
-        self.log.debug("Await change to %i %s (%i, %i existing)", subject_id, property_uri, 
len(self.inserts_list), len(self.deletes_list))
-
-        self.class_to_track = rdf_class
-
-        property_id = self.get_resource_id_by_uri(property_uri)
-
-        def find_property_change(event_list):
-            matched = False
-            remaining_events = []
-
-            for event in event_list:
-                if event[1] == subject_id and event[2] == property_id:
-                    self.log.debug("Matched property change: %s", str(event))
-                    matched = True
-                else:
-                    remaining_events += [event]
-
-            return matched, remaining_events
 
-        def match_cb(event_list):
-            matched, remaining_events = find_property_change(event_list)
-            exit_loop = matched
-            return exit_loop, remaining_events
-
-        # Check the list of previously received events for matches
-        (existing_match, self.inserts_list) = find_property_change(self.inserts_list)
-        if not existing_match:
-            (existing_match, self.deletes_list) = find_property_change(self.deletes_list)
-
-        if not existing_match:
-            self._enable_await_timeout()
-            self.inserts_match_function = match_cb
-            self.deletes_match_function = match_cb
-            # Run the event loop until the correct notification arrives
-            try:
-                self.loop.run_checked()
-            except GraphUpdateTimeoutException:
-                raise GraphUpdateTimeoutException(
-                    "Timeout waiting for property change, subject %i property %s (%i)" % (subject_id, 
property_uri, property_id))
-
-        self.inserts_match_function = None
-        self.deletes_match_function = None
-        self.class_to_track = None
-
-    # Note: The methods below call the tracker-store D-Bus API directly. This
-    # is useful for testing this API surface, but we recommand that all regular
-    # applications use libtracker-sparql library to talk to the database.
-
-    def query(self, query, **kwargs):
-        return self.resources.SparqlQuery('(s)', query, **kwargs)
-
-    def update(self, update_sparql, **kwargs):
-        return self.resources.SparqlUpdate('(s)', update_sparql, **kwargs)
+    def query(self, query):
+        cursor = self.conn.query(query, None)
+        result = []
+        while cursor.next():
+            row = []
+            for i in range(0, cursor.get_n_columns()):
+                row.append(cursor.get_string(i)[0])
+            result.append(row)
+        return result
+
+    def update(self, update_sparql):
+        self.conn.update(update_sparql, 0, None)
 
     def load(self, ttl_uri, **kwargs):
         return self.resources.Load('(s)', ttl_uri, **kwargs)


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]