[nautilus] file-operations: Fix progress of copies over gvfs



commit 9b8603da7b65c52f91ba571b47c290e7116b2e78
Author: rpm-build <rpm-build>
Date:   Thu Nov 24 10:36:13 2016 +0100

    file-operations: Fix progress of copies over gvfs
    
    commit 24dae9e0a "file-operations: avoid scanning the same files"
    added a way to skip files which have already been processed
    in scan_dir(). To do that, the code uses G_FILE_ATTRIBUTE_ID_FILE,
    which does not seem to be widely implemented by gvfs modules.
    
    When the file has no ID, it will not be accounted for in the SourceInfo
    structure used to track copy progress, resulting in progress not being
    shown, and Nautilus saying that it's copying 0 files.
    
    This commit changes the logic to use the file URI instead of the file ID
    to check if the  file has already been processed.

 src/nautilus-file-operations.c |   27 ++++++++++++---------------
 1 files changed, 12 insertions(+), 15 deletions(-)
---
diff --git a/src/nautilus-file-operations.c b/src/nautilus-file-operations.c
index 52e0a1b..d66c4c5 100644
--- a/src/nautilus-file-operations.c
+++ b/src/nautilus-file-operations.c
@@ -3161,7 +3161,6 @@ scan_dir (GFile      *dir,
 retry:
     error = NULL;
     enumerator = g_file_enumerate_children (dir,
-                                            G_FILE_ATTRIBUTE_ID_FILE ","
                                             G_FILE_ATTRIBUTE_STANDARD_NAME ","
                                             G_FILE_ATTRIBUTE_STANDARD_TYPE ","
                                             G_FILE_ATTRIBUTE_STANDARD_SIZE,
@@ -3173,14 +3172,16 @@ retry:
         error = NULL;
         while ((info = g_file_enumerator_next_file (enumerator, job->cancellable, &error)) != NULL)
         {
-            const char *file_id;
+            g_autoptr (GFile) file = NULL;
+            g_autofree char *file_uri = NULL;
 
-            file_id = g_file_info_get_attribute_string (info,
-                                                        G_FILE_ATTRIBUTE_ID_FILE);
+            file = g_file_enumerator_get_child (enumerator, info);
+            file_uri = g_file_get_uri (file);
+            g_warn_if_fail (file_uri != NULL);
 
-            if (file_id && !g_hash_table_contains (scanned, file_id))
+            if (!g_hash_table_contains (scanned, file_uri))
             {
-                g_hash_table_add (scanned, g_strdup (file_id));
+                g_hash_table_add (scanned, g_strdup (file_uri));
 
                 count_file (info, job, source_info);
 
@@ -3193,7 +3194,6 @@ retry:
                     g_queue_push_head (dirs, subdir);
                 }
             }
-
             g_object_unref (info);
         }
         g_file_enumerator_close (enumerator, job->cancellable, NULL);
@@ -3328,7 +3328,6 @@ scan_file (GFile      *file,
 retry:
     error = NULL;
     info = g_file_query_info (file,
-                              G_FILE_ATTRIBUTE_ID_FILE ","
                               G_FILE_ATTRIBUTE_STANDARD_TYPE ","
                               G_FILE_ATTRIBUTE_STANDARD_SIZE,
                               G_FILE_QUERY_INFO_NOFOLLOW_SYMLINKS,
@@ -3337,14 +3336,13 @@ retry:
 
     if (info)
     {
-        const char *file_id;
-
-        file_id = g_file_info_get_attribute_string (info,
-                                                    G_FILE_ATTRIBUTE_ID_FILE);
+        g_autofree char *file_uri = NULL;
 
-        if (file_id && !g_hash_table_contains (scanned, file_id))
+        file_uri = g_file_get_uri (file);
+        g_warn_if_fail (file_uri != NULL);
+        if (!g_hash_table_contains (scanned, file_uri))
         {
-            g_hash_table_add (scanned, g_strdup (file_id));
+            g_hash_table_add (scanned, g_strdup (file_uri));
 
             count_file (info, job, source_info);
 
@@ -3355,7 +3353,6 @@ retry:
                 g_queue_push_head (dirs, g_object_ref (file));
             }
         }
-
         g_object_unref (info);
     }
     else if (job->skip_all_error)


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]