[meld] Add auto-merge mode and supporting classes



commit 09496e25fa118bbdd6bcfcc9912a0a9f1077bd04
Author: Piotr Piastucki <the_leech users berlios de>
Date:   Wed Nov 25 22:15:41 2009 +0100

    Add auto-merge mode and supporting classes
    
    This patch adds support for 'auto-merge with ancestor' mode, allowing
    certain conflicts to be automatically resolved. This is the major step
    in closing bgo#578613. This commit introduces three new classes:
    
    matchers.MyersSequenceMatcher includes an optimized implementation of
    the improved Myers algorithm described by S. Wu, U. Manber, G. Myers, W.
    Miller in "An O(NP) Sequence Comparison Algorithm"
    (http://research.janelia.org/myers/Papers/np_diff.pdf).  The algorithm
    was selected because it is considered to be the best general-purpose
    diff algorithm. Unlike MyersSequenceMatcher, difflib.SequenceMatcher may
    not always return the longest common sequence (LCS) due to the
    underlying algorithm and aggressive performance optimizations.  As a
    result the number of non-matching lines may be higher which in turn may
    lead to more conflicts. Such a suboptimal edit script may look more
    pleasing though. A similar issue affects the patience diff algorithm
    (bzr) which ignores non-unique matching lines and thus tends to report
    false conflicts.
    
    merge.Merger class extends diffutil.Differ because we want it to be
    usable as a drop-in replacement for Differ in FileMerge/FileDiff. The
    class provides all the auto-merge logic including improved automated
    conflict resolution rules, keeping track of unresolved merge conflicts
    and merge_file() method that can merge changes from 2 diffs into a
    single file. More sophisticated rules may be added to Merger in the
    future to make the tool smarter. By default, Merger uses
    MyersSequenceMatcher to handle all diff calculations including diff
    highlighting.
    
    filemerge.FileMerge overrides some FileDiff methods to change the
    behaviour of the UI in auto-merge mode. A small hack with a hidden text
    buffer allows us to re-use the existing file loading functionality to
    load the common ancestor file without showing it to the user.

 meld/filemerge.py |  131 +++++++++++++++++++++++++++
 meld/matchers.py  |  257 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 meld/merge.py     |  234 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 622 insertions(+), 0 deletions(-)
---
diff --git a/meld/filemerge.py b/meld/filemerge.py
new file mode 100644
index 0000000..ff6a8f9
--- /dev/null
+++ b/meld/filemerge.py
@@ -0,0 +1,131 @@
+### Copyright (C) 2009 Piotr Piastucki <the_leech users berlios de>
+
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License as published by
+### the Free Software Foundation; either version 2 of the License, or
+### (at your option) any later version.
+
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+### GNU General Public License for more details.
+
+### You should have received a copy of the GNU General Public License
+### along with this program; if not, write to the Free Software
+### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import filediff
+from gettext import gettext as _
+import gtk
+import merge
+
+MASK_SHIFT, MASK_CTRL = 1, 2
+
+
+class FileMerge(filediff.FileDiff):
+
+    def __init__(self, prefs, num_panes):
+        filediff.FileDiff.__init__(self, prefs, num_panes)
+        self.linediffer = merge.Merger()
+        self.hidden_textbuffer = gtk.TextBuffer()
+
+    def _connect_buffer_handlers(self):
+        filediff.FileDiff._connect_buffer_handlers(self)
+        self.textview[0].set_editable(0)
+        self.textview[2].set_editable(0)
+
+    def set_files(self, files):
+        if len(files) == 4:
+            self.ancestor_file = files[1]
+            self.merge_file = files[3]
+            files[1] = files[3]
+            files = files[:3]
+        filediff.FileDiff.set_files(self, files)
+
+    def _set_files_internal(self, files):
+        panetext = ["\n"] * len(files)
+        textbuffers = self.textbuffer[:]
+        textbuffers[1] = self.hidden_textbuffer
+        files[1] = self.ancestor_file
+        for i in self._load_files(files, textbuffers, panetext):
+            yield i
+        for i in self._merge_files(panetext):
+            yield i
+        for i in self._diff_files(files, panetext):
+            yield i
+
+    def _get_custom_status_text(self):
+        return "   Conflicts: %i" % (self.linediffer.get_unresolved_count())
+
+    def set_buffer_writable(self, buf, yesno):
+        if buf == self.hidden_textbuffer:
+            buf = self.textbuffer[1]
+            yesno = True
+        pane = self.textbuffer.index(buf)
+        self.bufferdata[pane].writable = yesno
+        self.recompute_label()
+
+    def _merge_files(self, panetext):
+        yield _("[%s] Computing differences") % self.label_text
+        lines = map(lambda x: x.split("\n"), panetext)
+        filteredpanetext = [self._filter_text(p) for p in panetext]
+        filteredlines = map(lambda x: x.split("\n"), filteredpanetext)
+        merger = merge.Merger()
+        step = merger.set_sequences_iter(filteredlines)
+        while step.next() == None:
+            yield 1
+        yield _("[%s] Merging files") % self.label_text
+        for panetext[1] in merger.merge_file(filteredlines, lines):
+            yield 1
+        self.linediffer.unresolved = merger.unresolved
+        self.textbuffer[1].insert(self.textbuffer[1].get_end_iter(), panetext[1])
+        self.bufferdata[1].modified = 1
+        self.recompute_label()
+        yield 1
+
+    def _linkmap_draw_icon(self, context, which, change, x, f0, t0):
+        pix0 = self.pixbuf_delete
+        if which:
+            if self.keymask & MASK_CTRL:
+                pix1 = self.pixbuf_copy1
+            else:
+                pix1 = self.pixbuf_apply1
+        else:
+            if self.keymask & MASK_CTRL:
+                pix1 = self.pixbuf_copy0
+            else:
+                pix1 = self.pixbuf_apply0
+
+        if which:
+            if change in ("delete"):
+                self.paint_pixbuf_at(context, pix0, 0, f0)
+            if change in ("insert", "replace", "conflict"):
+                self.paint_pixbuf_at(context, pix1, x, t0)
+        else:
+            if change in ("insert"):
+                self.paint_pixbuf_at(context, pix0, x, t0)
+            if change in ("delete", "replace", "conflict"):
+                self.paint_pixbuf_at(context, pix1, 0, f0)
+
+    def _linkmap_process_event(self, event, which, side, htotal, rect_x, pix_width, pix_height):
+        origsrc = which + side
+        src = 2 * which
+        dst = 1
+        srcadj = self.scrolledwindow[src].get_vadjustment()
+        dstadj = self.scrolledwindow[dst].get_vadjustment()
+        for c in self.linediffer.pair_changes(src, dst):
+            if c[0] == "insert":
+                if origsrc != 1:
+                    continue
+                h = self._line_to_pixel(dst, c[3]) - dstadj.value
+            else:
+                if origsrc == 1:
+                    continue
+                h = self._line_to_pixel(src, c[1]) - srcadj.value
+            if h < 0: # find first visible chunk
+                continue
+            elif h > htotal: # we've gone past last visible
+                break
+            elif h < event.y and event.y < h + pix_height:
+                self.mouse_chunk = ((src, dst), (rect_x, h, pix_width, pix_height), c)
+                break
diff --git a/meld/matchers.py b/meld/matchers.py
new file mode 100644
index 0000000..787ff9f
--- /dev/null
+++ b/meld/matchers.py
@@ -0,0 +1,257 @@
+### Copyright (C) 2009 Piotr Piastucki <the_leech users berlios de>
+
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License as published by
+### the Free Software Foundation; either version 2 of the License, or
+### (at your option) any later version.
+
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+### GNU General Public License for more details.
+
+### You should have received a copy of the GNU General Public License
+### along with this program; if not, write to the Free Software
+### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import difflib
+
+
+def find_common_prefix(a, b):
+    if a[0] == b[0]:
+        pointermax = min(len(a), len(b))
+        pointermid = pointermax
+        pointermin = 0
+        while pointermin < pointermid:
+            if a[pointermin:pointermid] == b[pointermin:pointermid]:
+                pointermin = pointermid
+            else:
+                pointermax = pointermid
+            pointermid = int((pointermax - pointermin) / 2 + pointermin)
+        return pointermid
+    return 0
+
+
+def find_common_suffix(a, b):
+    if a[-1] == b[-1]:
+        pointermax = min(len(a), len(b))
+        pointermid = pointermax
+        pointermin = 0
+        while pointermin < pointermid:
+            if (a[-pointermid:len(a) - pointermin] == b[-pointermid:len(b) - pointermin]):
+                pointermin = pointermid
+            else:
+                pointermax = pointermid
+            pointermid = int((pointermax - pointermin) / 2 + pointermin)
+        return pointermid
+    return 0
+
+
+class MyersSequenceMatcher(difflib.SequenceMatcher):
+
+    def __init__(self, isjunk=None, a="", b=""):
+        if isjunk is not None:
+            raise NotImplementedError('isjunk is not supported yet')
+        self.a = a
+        self.b = b
+        self.matching_blocks = self.opcodes = None
+        #perf optimization switches
+        self.discard_lines = True
+        self.find_prefix_suffix = True
+        #fields needed by preprocessor so that preprocessing may shared by more than 1 LCS algorithm
+        self.aindex = {}
+        self.bindex = {}
+        self.common_prefix = self.common_suffix = 0
+        self.lines_discarded = False
+
+    def get_matching_blocks(self):
+        if self.matching_blocks is None:
+            for i in self.initialise():
+                pass
+        return self.matching_blocks
+
+    def get_difference_opcodes(self):
+        return filter(lambda x: x[0] != "equal", self.get_opcodes())
+
+    def preprocess(self):
+        """
+        Pre-processing optimizations:
+        1) remove common prefix and common suffix
+        2) remove lines that do not match
+        """
+        a = self.a
+        b = self.b
+        aindex = self.aindex = {}
+        bindex = self.bindex = {}
+        n = len(a)
+        m = len(b)
+        # remove common prefix and common suffix
+        self.common_prefix = self.common_suffix = 0
+        if self.find_prefix_suffix:
+            self.common_prefix = find_common_prefix(a, b)
+            if self.common_prefix > 0:
+                a = a[self.common_prefix:]
+                b = b[self.common_prefix:]
+                n -= self.common_prefix
+                m -= self.common_prefix
+
+            if n > 0 and m > 0:
+                self.common_suffix = find_common_suffix(a, b)
+                if self.common_suffix > 0:
+                    a = a[:n - self.common_suffix]
+                    b = b[:m - self.common_suffix]
+                    n -= self.common_suffix
+                    m -= self.common_suffix
+        # discard lines that do not match any line from the other file
+        if self.discard_lines and n > 0 and m > 0:
+            a2 = []
+            b2 = []
+            j = 0
+            for i, newline in enumerate(b):
+                if newline in a:
+                    b2.append(newline)
+                    bindex[j] = i
+                    j += 1
+            k = 0
+            for i, origline in enumerate(a):
+                if origline in b:
+                    a2.append(a[i])
+                    aindex[k] = i
+                    k += 1
+            self.lines_discarded = m - j > 10 or n - k > 10
+            if self.lines_discarded:
+                a = a2
+                b = b2
+        return (a, b)
+
+    def build_matching_blocks(self, lastsnake, snakes):
+        """
+        Build list of matching blocks based on snakes taking into consideration all preprocessing
+        optimizations:
+        1) add separate blocks for common prefix and common suffix
+        2) shift positions and split blocks based on the list of discarded non-matching lines
+        """
+        self.matching_blocks = matching_blocks = []
+
+        common_prefix = self.common_prefix
+        common_suffix = self.common_suffix
+        aindex = self.aindex
+        bindex = self.bindex
+        while lastsnake != None:
+            lastsnake, x, y, snake = snakes[lastsnake]
+            if self.lines_discarded:
+                # split snakes if needed because of discarded lines
+                x += snake - 1
+                y += snake - 1
+                xprev = aindex[x] + common_prefix
+                yprev = bindex[y] + common_prefix
+                if snake > 1:
+                    newsnake = 1
+                    for i in range(1, snake):
+                        x -= 1
+                        y -= 1
+                        xnext = aindex[x] + common_prefix
+                        ynext = bindex[y] + common_prefix
+                        if (xprev - xnext != 1) or (yprev - ynext != 1):
+                            matching_blocks.insert(0, (xprev, yprev, newsnake))
+                            newsnake = 0
+                        xprev = xnext
+                        yprev = ynext
+                        newsnake += 1
+                    matching_blocks.insert(0, (xprev, yprev, newsnake))
+                else:
+                    matching_blocks.insert(0, (xprev, yprev, snake))
+            else:
+                matching_blocks.insert(0, (x + common_prefix, y + common_prefix, snake))
+        if common_prefix:
+            matching_blocks.insert(0, (0, 0, common_prefix))
+        if common_suffix:
+            matching_blocks.append((len(self.a) - common_suffix, len(self.b) - common_suffix, common_suffix))
+        matching_blocks.append((len(self.a), len(self.b), 0))
+        # clean-up to free memory
+        self.aindex = self.bindex = None
+
+    def initialise(self):
+        """
+        Optimized implementaion of the O(NP) algorithm described by Sun Wu, Udi Manber, Gene Myers, Webb Miller
+        ("An O(NP) Sequence Comparison Algorithm", 1989)
+        http://research.janelia.org/myers/Papers/np_diff.pdf
+        """
+
+        a, b = self.preprocess()
+        m = len(a)
+        n = len(b)
+        middle = m + 1
+        lastsnake = None
+        delta = n - m
+        dmin = min(0, delta)
+        dmax = max(0, delta)
+
+        snakes = []
+        if n > 0 and m > 0:
+            size = n + m + 2
+            fp = [(-1, None)] * size
+            p = -1
+            while True:
+                p += 1
+                if not p % 100:
+                    yield None
+                # move along vertical edge
+                yv = -1
+                node = None
+                for k in range(dmin - p, delta, 1):
+                    km = k + middle
+                    if yv < fp[km + 1][0]:
+                        yv, node = fp[km + 1]
+                    else:
+                        yv += 1
+                    x = yv - k
+                    snake = 0
+                    while x < m and yv < n and a[x] == b[yv]:
+                        x += 1
+                        yv += 1
+                        snake += 1
+                    if snake:
+                        snakes.append((node, x - snake, yv - snake, snake))
+                        node = len(snakes) - 1
+                    fp[km] = (yv, node)
+                # move along horizontal edge
+                yh = -1
+                node = None
+                for k in range(dmax + p, delta, -1):
+                    km = k + middle
+                    if fp[km - 1][0] >= yh:
+                        yh, node = fp[km - 1]
+                        yh += 1
+                    x = yh - k
+                    snake = 0
+                    while x < m and yh < n and a[x] == b[yh]:
+                        x += 1
+                        yh += 1
+                        snake += 1
+                    if snake:
+                        snakes.append((node, x - snake, yh - snake, snake))
+                        node = len(snakes) - 1
+                    fp[km] = (yh, node)
+                # point on the diagonal that leads to the sink
+                km = delta + middle
+                if yv < yh:
+                    y, node = fp[km + 1]
+                else:
+                    y, node = fp[km - 1]
+                    y += 1
+                x = y - delta
+                snake = 0
+                while x < m and y < n and a[x] == b[y]:
+                    x += 1
+                    y += 1
+                    snake += 1
+                if snake:
+                    snakes.append((node, x - snake, y - snake, snake))
+                    node = len(snakes) - 1
+                fp[km] = (y, node)
+                if y >= n:
+                    lastsnake = node
+                    break
+        self.build_matching_blocks(lastsnake, snakes)
+        yield 1
diff --git a/meld/merge.py b/meld/merge.py
new file mode 100644
index 0000000..5f0e926
--- /dev/null
+++ b/meld/merge.py
@@ -0,0 +1,234 @@
+### Copyright (C) 2009 Piotr Piastucki <the_leech users berlios de>
+
+### This program is free software; you can redistribute it and/or modify
+### it under the terms of the GNU General Public License as published by
+### the Free Software Foundation; either version 2 of the License, or
+### (at your option) any later version.
+
+### This program is distributed in the hope that it will be useful,
+### but WITHOUT ANY WARRANTY; without even the implied warranty of
+### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+### GNU General Public License for more details.
+
+### You should have received a copy of the GNU General Public License
+### along with this program; if not, write to the Free Software
+### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import diffutil
+import matchers
+#from _patiencediff_py import PatienceSequenceMatcher_py as PatienceSequenceMatcher
+
+
+class Merger(diffutil.Differ):
+
+    _matcher = matchers.MyersSequenceMatcher
+   # _matcher = PatienceSequenceMatcher
+
+    def __init__(self):
+        diffutil.Differ.__init__(self)
+        self.auto_merge = False
+        self.unresolved = []
+
+    def _auto_merge(self, using, texts):
+        l0, h0, l1, h1, l2, h2 = self._merge_blocks(using)
+
+        if h0 - l0 == h2 - l2 and texts[0][l0:h0] == texts[2][l2:h2]:
+            # handle simple conflicts here (exact match)
+            if l1 != h1 and l0 == h0:
+                tag = "delete"
+            elif l1 != h1:
+                tag = "replace"
+            else:
+                tag = "insert"
+            out0 = (tag, l1, h1, l0, h0)
+            out1 = (tag, l1, h1, l2, h2)
+        else:
+            # here we will try to resolve more complex conflicts automatically... if possible
+            out0 = ('conflict', l1, h1, l0, h0)
+            out1 = ('conflict', l1, h1, l2, h2)
+            if self.auto_merge:
+                len0 = h0 - l0
+                len1 = h1 - l1
+                len2 = h2 - l2
+                if (len0 > 0 and len2 > 0) and (len0 == len1 or len2 == len1 or len1 == 0):
+                    matcher = self._matcher(None, texts[0][l0:h0], texts[2][l2:h2])
+                    for chunk in matcher.get_opcodes():
+                        s1 = l1
+                        e1 = l1
+                        if len0 == len1:
+                            s1 += chunk[1]
+                            e1 += chunk[2]
+                        elif len2 == len1:
+                            s1 += chunk[3]
+                            e1 += chunk[4]
+                        if chunk[0] == 'equal':
+                            out0 = ('replace', s1, e1, l0 + chunk[1], l0 + chunk[2])
+                            out1 = ('replace', s1, e1, l2 + chunk[3], l2 + chunk[4])
+                            yield out0, out1
+                        else:
+                            out0 = ('conflict', s1, e1, l0 + chunk[1], l0 + chunk[2])
+                            out1 = ('conflict', s1, e1, l2 + chunk[3], l2 + chunk[4])
+                            yield out0, out1
+                    return
+#                elif len0 > 0 and len2 > 0:
+                    # this logic will resolve more conflicts automatically, but unresolved conflicts may sometimes look confusing
+                    # as the line numbers in ancestor file will be interpolated and may not reflect the actual changes
+#                    matcher = self._matcher(None, texts[0][l0:h0], texts[2][l2:h2])
+#                    if len0 > len2:
+#                        maxindex = 1
+#                        maxlen = len0
+#                    else:
+#                        maxindex = 3
+#                        maxlen = len2
+#                    for chunk in matcher.get_opcodes():
+#                        if chunk[0] == 'equal':
+#                            out0 = ('replace', l1 + len1 * chunk[maxindex] / maxlen, l1 + len1 * chunk[maxindex + 1] / maxlen, l0 + chunk[1], l0 + chunk[2])
+#                            out1 = ('replace', l1 + len1 * chunk[maxindex] / maxlen, l1 + len1 * chunk[maxindex + 1] / maxlen, l2 + chunk[3], l2 + chunk[4])
+#                            yield out0, out1
+#                        else:
+#                            out0 = ('conflict', l1 + len1 * chunk[maxindex] / maxlen, l1 + len1 * chunk[maxindex + 1] / maxlen, l0 + chunk[1], l0 + chunk[2])
+#                            out1 = ('conflict', l1 + len1 * chunk[maxindex] / maxlen, l1 + len1 * chunk[maxindex + 1] / maxlen, l2 + chunk[3], l2 + chunk[4])
+#                            yield out0, out1
+#                    return
+                else:
+                    # some tricks to resolve even more conflicts automatically
+                    # unfortunately the resulting chunks cannot be used to highlight changes
+                    # but hey, they are good enough to merge the resulting file :)
+                    chunktype = using[0][0][0]
+                    for chunkarr in using:
+                        for chunk in chunkarr:
+                            if chunk[0] != chunktype:
+                                chunktype = None
+                                break
+                        if not chunktype:
+                            break
+                    if chunktype == 'delete':
+                        # delete + delete (any length) -> split into delete/conflict
+                        seq0 = seq1 = None
+                        while 1:
+                            if seq0 == None:
+                                try:
+                                    seq0 = using[0].pop(0)
+                                    i0 = seq0[1]
+                                    end0 = seq0[4]
+                                except IndexError:
+                                    break
+                            if seq1 == None:
+                                try:
+                                    seq1 = using[1].pop(0)
+                                    i1 = seq1[1]
+                                    end1 = seq1[4]
+                                except IndexError:
+                                    break
+                            highstart = max(i0, i1)
+                            if i0 != i1:
+                                out0 = ('conflict', i0 - highstart + i1, highstart, seq0[3] - highstart + i1, seq0[3])
+                                out1 = ('conflict', i1 - highstart + i0, highstart, seq1[3] - highstart + i0, seq1[3])
+                                yield out0, out1
+                            lowend = min(seq0[2], seq1[2])
+                            if highstart != lowend:
+                                out0 = ('delete', highstart, lowend, seq0[3], seq0[4])
+                                out1 = ('delete', highstart, lowend, seq1[3], seq1[4])
+                                yield out0, out1
+                            i0 = i1 = lowend
+                            if lowend == seq0[2]:
+                                seq0 = None
+                            if lowend == seq1[2]:
+                                seq1 = None
+
+                        if seq0:
+                            out0 = ('conflict', i0, seq0[2], seq0[3], seq0[4])
+                            out1 = ('conflict', i1, i1 + seq0[2] - i0, end1, end1 + seq0[2] - i0)
+                            yield out0, out1
+                        elif seq1:
+                            out0 = ('conflict', i0, i0 + seq1[2] - i1, end0, end0 + seq2[2] - i1)
+                            out1 = ('conflict', i1, seq1[2], seq1[3], seq1[4])
+                            yield out0, out1
+                        return
+        yield out0, out1
+
+    def change_sequence(self, sequence, startidx, sizechange, texts):
+        if sequence == 1:
+            lo = 0
+            for c in self.unresolved:
+                if startidx <= c:
+                    break
+                lo += 1
+            if lo < len(self.unresolved):
+                hi = lo
+                if sizechange < 0:
+                    for c in self.unresolved[lo:]:
+                        if startidx - sizechange <= c:
+                            break
+                        hi += 1
+                elif sizechange == 0 and startidx == self.unresolved[lo]:
+                    hi += 1
+
+                if hi < len(self.unresolved):
+                    self.unresolved[hi:] = [c + sizechange for c in self.unresolved[hi:]]
+                self.unresolved[lo:hi] = []
+
+        return diffutil.Differ.change_sequence(self, sequence, startidx, sizechange, texts)
+
+    def get_unresolved_count(self):
+        return len(self.unresolved)
+
+    def _apply_change(self, text, change, mergedtext):
+        LO, HI = 1, 2
+        if change[0] == 'insert':
+            for i in range(change[LO + 2], change[HI + 2]):
+                mergedtext.append(text[i])
+            return 0
+        elif change[0] == 'replace':
+            for i in range(change[LO + 2], change[HI + 2]):
+                mergedtext.append(text[i])
+            return change[HI] - change[LO]
+        else:
+            return change[HI] - change[LO]
+
+    def merge_file(self, filteredtexts, texts):
+        LO, HI = 1, 2
+        self.auto_merge = True
+        self.unresolved = unresolved = []
+        diffs = self.diffs
+        lastline = 0
+        mergedline = 0
+        mergedtext = []
+        for change in self._merge_diffs(diffs[0], diffs[1], filteredtexts):
+            yield None
+            low_mark = lastline
+            if change[0] != None:
+                low_mark = change[0][LO]
+            if change[1] != None:
+                if change[1][LO] > low_mark:
+                    low_mark = change[1][LO]
+            for i in range(lastline, low_mark, 1):
+                mergedtext.append(texts[1][i])
+            mergedline += low_mark - lastline
+            lastline = low_mark
+            if change[0] != None and change[1] != None and change[0][0] == 'conflict':
+                high_mark = max(change[0][HI], change[1][HI])
+                if low_mark < high_mark:
+                    for i in range(low_mark, high_mark):
+                        mergedtext.append("(??)" + texts[1][i])
+                        unresolved.append(mergedline)
+                        mergedline += 1
+                else:
+                    #conflictsize = min(1, max(change[0][HI + 2] - change[0][LO + 2], change[1][HI + 2] - change[1][LO + 2]))
+                    #for i in range(conflictsize):
+                    mergedtext.append("(??)")
+                    unresolved.append(mergedline)
+                    mergedline += 1
+                lastline = high_mark
+            elif change[0] != None:
+                lastline += self._apply_change(texts[0], change[0], mergedtext)
+                mergedline += change[0][HI + 2] - change[0][LO + 2]
+            else:
+                lastline += self._apply_change(texts[2], change[1], mergedtext)
+                mergedline += change[1][HI + 2] - change[1][LO + 2]
+        baselen = len(texts[1])
+        for i in range(lastline, baselen, 1):
+            mergedtext.append(texts[1][i])
+
+        self.auto_merge = False
+        yield "\n".join(mergedtext)



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]