[gnome-autoar/wip/oholy/compressor-hardlink-fixes: 3/3] compressor: Fix hardlink handling for new cpio format




commit 63fb6debb8c171a0b92fc35cf8d24e10335632df
Author: Ondrej Holy <oholy redhat com>
Date:   Wed Mar 24 16:16:51 2021 +0100

    compressor: Fix hardlink handling for new cpio format
    
    The new cpio format writes the file content for hardlinks the last
    time an inode is seen. To achieve this, the archive entries are
    internally stored over `archive_entry_linkify` and written later.
    In that case, `archive_entry_linkify` takes ownership of the
    `archive_entry` struct and set the pointer to `NULL`. However,
    gnome-autoar keeps using the original entry, which leads to weird
    errors. Also, after all archive entries are written,
    `archive_entry_linkify` has to be called in a loop to write all
    deffered entries. However, it is called only once currently, which
    can cause that some of the hardlinks are not written to the
    archive. Let's correctly handle the entry ownership and call
    `archive_entry_linkify` to fix hardlink handling for the new cpio
    format.
    
    Relates: https://gitlab.gnome.org/GNOME/gnome-autoar/-/issues/25

 gnome-autoar/autoar-compressor.c | 31 +++++++++++++++++++------------
 1 file changed, 19 insertions(+), 12 deletions(-)
---
diff --git a/gnome-autoar/autoar-compressor.c b/gnome-autoar/autoar-compressor.c
index 87407b4..2929c83 100644
--- a/gnome-autoar/autoar-compressor.c
+++ b/gnome-autoar/autoar-compressor.c
@@ -963,9 +963,7 @@ autoar_compressor_do_add_to_archive (AutoarCompressor *self,
                        g_object_ref (file));
 
   {
-    struct archive_entry *entry, *sparse;
-
-    entry = self->entry;
+    struct archive_entry *sparse;
 
      /* Hardlinks are handled in different ways by the archive formats. The
      * archive_entry_linkify function is a unified interface, which handling
@@ -977,20 +975,20 @@ autoar_compressor_do_add_to_archive (AutoarCompressor *self,
     if (g_file_info_has_attribute (info, G_FILE_ATTRIBUTE_UNIX_DEVICE) &&
         g_file_info_has_attribute (info, G_FILE_ATTRIBUTE_UNIX_INODE) &&
         g_file_info_has_attribute (info, G_FILE_ATTRIBUTE_UNIX_NLINK))
-      archive_entry_linkify (self->resolver, &entry, &sparse);
+      archive_entry_linkify (self->resolver, &self->entry, &sparse);
 
-    if (entry != NULL) {
+    if (self->entry != NULL) {
       GFile *file_to_read;
       const char *pathname_in_entry;
-      pathname_in_entry = archive_entry_pathname (entry);
+      pathname_in_entry = archive_entry_pathname (self->entry);
       file_to_read = g_hash_table_lookup (self->pathname_to_g_file,
                                           pathname_in_entry);
-      autoar_compressor_do_write_data (self, entry, file_to_read);
+      autoar_compressor_do_write_data (self, self->entry, file_to_read);
       /* Entries for non-regular files might have their size attribute
        * different to their actual size on the disk
        */
-      if (archive_entry_filetype (entry) != AE_IFREG &&
-          archive_entry_size (entry) != g_file_info_get_size (info)) {
+      if (archive_entry_filetype (self->entry) != AE_IFREG &&
+          archive_entry_size (self->entry) != g_file_info_get_size (info)) {
         self->completed_size += g_file_info_get_size (info);
         autoar_compressor_signal_progress (self);
       }
@@ -998,12 +996,16 @@ autoar_compressor_do_add_to_archive (AutoarCompressor *self,
       g_hash_table_remove (self->pathname_to_g_file, pathname_in_entry);
       /* We have registered g_object_unref function to free the GFile object,
        * so we do not have to unref it here. */
+    } else {
+      /* The archive_entry_linkify function stole our entry, so new one has to
+       * be allocated here to not crash on the next file. */
+      self->entry = archive_entry_new ();
     }
 
     if (sparse != NULL) {
       GFile *file_to_read;
       const char *pathname_in_entry;
-      pathname_in_entry = archive_entry_pathname (entry);
+      pathname_in_entry = archive_entry_pathname (self->entry);
       file_to_read = g_hash_table_lookup (self->pathname_to_g_file,
                                           pathname_in_entry);
       autoar_compressor_do_write_data (self, sparse, file_to_read);
@@ -1515,20 +1517,25 @@ autoar_compressor_step_create (AutoarCompressor *self)
       return;
   }
 
-  /* Process the final entry */
+  /* Flush deferred entries, if any, by calling linkify with entry unset. */
   {
     struct archive_entry *entry, *sparse;
     entry = NULL;
     archive_entry_linkify (self->resolver, &entry, &sparse);
-    if (entry != NULL) {
+    while (entry != NULL) {
       GFile *file_to_read;
       const char *pathname_in_entry;
+
       pathname_in_entry = archive_entry_pathname (entry);
       file_to_read = g_hash_table_lookup (self->pathname_to_g_file,
                                           pathname_in_entry);
       autoar_compressor_do_write_data (self, entry, file_to_read);
       /* I think we do not have to remove the entry in the hash table now
        * because we are going to free the entire hash table. */
+
+      /* The archive_entry is freed by the archive_entry_linkify function. */
+      entry = NULL;
+      archive_entry_linkify (self->resolver, &entry, &sparse);
     }
   }
 }


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]