Fossil

Changes On Branch better-merge
Login

Changes On Branch better-merge

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Changes In Branch better-merge Excluding Merge-Ins

This is equivalent to a diff from 2ec8a7ae38 to 4582d69e34

2024-12-05
20:12
When doing a merge, if a merge conflict occurs, a new section appears in the merge conflict markings that shows the suggested resolution. In addition, the "merge-info --tk" GUI is improved so that line numbers in the result column correspond with line numbers in the actual file. ... (check-in: 1cf0d372fa user: drh tags: trunk)
20:08
Improvements to the file selection option-menu in the merge UI. ... (Closed-Leaf check-in: 4582d69e34 user: drh tags: better-merge)
19:22
Improvements to the merge UI. Fix a mostly-harmless assertion associated with merge marks. ... (check-in: 48827897cf user: drh tags: better-merge)
12:15
Steps toward doing a better job of automatically resolving merge conflicts. Compiles but does not work. This is an incremental check-in. ... (check-in: 849c7eb6ca user: drh tags: better-merge)
2024-12-04
19:25
Fix rendering of merge information that contains "N" entries followed by "S". ... (check-in: 2ec8a7ae38 user: drh tags: trunk)
18:57
Bug fix in the previous check-in for the "--tk" option on the "3-way-merge" command. ... (check-in: d6fa91fdc8 user: drh tags: trunk)

Changes to src/blob.c.

663
664
665
666
667
668
669
670

671
672
673
674
675
676
677
void blob_dehttpize(Blob *pBlob){
  blob_materialize(pBlob);
  pBlob->nUsed = dehttpize(pBlob->aData);
}

/*
** Extract N bytes from blob pFrom and use it to initialize blob pTo.
** Return the actual number of bytes extracted.

**
** After this call completes, pTo will be an ephemeral blob.
*/
int blob_extract(Blob *pFrom, int N, Blob *pTo){
  blob_is_init(pFrom);
  assert_blob_is_reset(pTo);
  if( pFrom->iCursor + N > pFrom->nUsed ){







|
>







663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
void blob_dehttpize(Blob *pBlob){
  blob_materialize(pBlob);
  pBlob->nUsed = dehttpize(pBlob->aData);
}

/*
** Extract N bytes from blob pFrom and use it to initialize blob pTo.
** Return the actual number of bytes extracted.  The cursor position
** is advanced by the number of bytes extracted.
**
** After this call completes, pTo will be an ephemeral blob.
*/
int blob_extract(Blob *pFrom, int N, Blob *pTo){
  blob_is_init(pFrom);
  assert_blob_is_reset(pTo);
  if( pFrom->iCursor + N > pFrom->nUsed ){
685
686
687
688
689
690
691















































692
693
694
695
696
697
698
  pTo->nAlloc = N;
  pTo->aData = &pFrom->aData[pFrom->iCursor];
  pTo->iCursor = 0;
  pTo->xRealloc = blobReallocStatic;
  pFrom->iCursor += N;
  return N;
}
















































/*
** Rewind the cursor on a blob back to the beginning.
*/
void blob_rewind(Blob *p){
  p->iCursor = 0;
}







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
  pTo->nAlloc = N;
  pTo->aData = &pFrom->aData[pFrom->iCursor];
  pTo->iCursor = 0;
  pTo->xRealloc = blobReallocStatic;
  pFrom->iCursor += N;
  return N;
}

/*
** Extract N **lines** of text from blob pFrom beginning at the current
** cursor position and use that text to initialize blob pTo.  Unlike the
** blob_extract() routine,  the cursor position is unchanged.
**
** pTo is assumed to be uninitialized.
**
** After this call completes, pTo will be an ephemeral blob.
*/
int blob_extract_lines(Blob *pFrom, int N, Blob *pTo){
  int i;
  int mx;
  int iStart;
  int n;
  const char *z;

  blob_zero(pTo);
  z = pFrom->aData;
  i = pFrom->iCursor;
  mx = pFrom->nUsed;
  while( N>0 ){
    while( i<mx && z[i]!='\n' ){ i++; }
    if( i>=mx ) break;
    i++;
    N--;
  }
  iStart = pFrom->iCursor;
  n = blob_extract(pFrom, i-pFrom->iCursor, pTo);
  pFrom->iCursor = iStart;
  return n;
}

/*
** Return the number of lines of text in the blob.  If the last
** line is incomplete (if it does not have a \n at the end) then
** it still counts.
*/
int blob_linecount(Blob *p){
  int n = 0;
  int i;
  for(i=0; i<p->nUsed; i++){
    if( p->aData[i]=='\n' ) n++;
  }
  if( p->nUsed>0 && p->aData[p->nUsed-1]!='\n' ) n++;
  return n;
}

/*
** Rewind the cursor on a blob back to the beginning.
*/
void blob_rewind(Blob *p){
  p->iCursor = 0;
}

Changes to src/diff.c.

48
49
50
51
52
53
54

55
56
57
58
59
60
61
#define DIFF_JSON              0x00010000 /* JSON output */
#define DIFF_DEBUG             0x00020000 /* Debugging diff output */
#define DIFF_RAW               0x00040000 /* Raw triples - for debugging */
#define DIFF_TCL               0x00080000 /* For the --tk option */
#define DIFF_INCBINARY         0x00100000 /* The --diff-binary option */
#define DIFF_SHOW_VERS         0x00200000 /* Show compared versions */
#define DIFF_DARKMODE          0x00400000 /* Use dark mode for HTML */


/*
** Per file information that may influence output.
*/
#define DIFF_FILE_ADDED        0x40000000 /* Added or rename destination */
#define DIFF_FILE_DELETED      0x80000000 /* Deleted or rename source */
#define DIFF_FILE_MASK         0xc0000000 /* Used for clearing file flags */







>







48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#define DIFF_JSON              0x00010000 /* JSON output */
#define DIFF_DEBUG             0x00020000 /* Debugging diff output */
#define DIFF_RAW               0x00040000 /* Raw triples - for debugging */
#define DIFF_TCL               0x00080000 /* For the --tk option */
#define DIFF_INCBINARY         0x00100000 /* The --diff-binary option */
#define DIFF_SHOW_VERS         0x00200000 /* Show compared versions */
#define DIFF_DARKMODE          0x00400000 /* Use dark mode for HTML */
#define DIFF_BY_TOKEN          0x01000000 /* Split on tokens, not lines */

/*
** Per file information that may influence output.
*/
#define DIFF_FILE_ADDED        0x40000000 /* Added or rename destination */
#define DIFF_FILE_DELETED      0x80000000 /* Deleted or rename source */
#define DIFF_FILE_MASK         0xc0000000 /* Used for clearing file flags */
317
318
319
320
321
322
323







































































































324
325
326
327
328
329
330
  }while( zNL[0]!='\0' && zNL[1]!='\0' );
  assert( i==nLine );

  /* Return results */
  *pnLine = nLine;
  return a;
}








































































































/*
** Return zero if two DLine elements are identical.
*/
static int compare_dline(const DLine *pA, const DLine *pB){
  if( pA->h!=pB->h ) return 1;
  return memcmp(pA->z,pB->z, pA->h&LENGTH_MASK);







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
  }while( zNL[0]!='\0' && zNL[1]!='\0' );
  assert( i==nLine );

  /* Return results */
  *pnLine = nLine;
  return a;
}

/*
** Character classes for the purpose of tokenization.
**
**    1 - alphanumeric
**    2 - whitespace
**    3 - punctuation
*/
static char aTCharClass[256] = {
  2, 2, 2, 2,  2, 2, 2, 2,   2, 2, 2, 2,  2, 2, 2, 2, 
  2, 2, 2, 2,  2, 2, 2, 2,   2, 2, 2, 2,  2, 2, 2, 2, 
  2, 3, 3, 3,  3, 3, 3, 3,   3, 3, 3, 3,  3, 3, 3, 3,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 3, 3, 3,  3, 3, 3, 3,

  3, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 3,  3, 3, 3, 3,
  3, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 3,  3, 3, 3, 3,

  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,

  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1,
  1, 1, 1, 1,  1, 1, 1, 1,   1, 1, 1, 1,  1, 1, 1, 1
};

/*
** Count the number of tokens in the given string.
*/
static int count_tokens(const unsigned char *p, int n){
  int nToken = 0;
  int iPrev = 0;
  int i;
  for(i=0; i<n; i++){
    char x = aTCharClass[p[i]];
    if( x!=iPrev ){
      iPrev = x;
      nToken++;
    }
  }
  return nToken;
}

/*
** Return an array of DLine objects containing a pointer to the
** start of each token and a hash of that token.  The lower
** bits of the hash store the length of each token.
**
** This is like break_into_lines() except that it works with tokens
** instead of lines.  A token is:
**
**     *  A contiguous sequence of alphanumeric characters.
**     *  A contiguous sequence of whitespace
**     *  A contiguous sequence of punctuation characters.
**
** Return 0 if the file is binary or contains a line that is
** too long.
*/
static DLine *break_into_tokens(
  const char *z,
  int n,
  int *pnToken,
  u64 diffFlags
){
  int nToken, i, k;
  u64 h, h2;
  DLine *a;
  unsigned char *p = (unsigned char*)z;

  nToken = count_tokens(p, n);
  a = fossil_malloc( sizeof(a[0])*(nToken+1) );
  memset(a, 0, sizeof(a[0])*(nToken+1));
  if( n==0 ){
    *pnToken = 0;
    return a;
  }
  i = 0;
  while( n>0 ){
    char x = aTCharClass[*p];
    h = 0xcbf29ce484222325LL;
    for(k=1; k<n && aTCharClass[p[k]]==x; k++){
      h ^= p[k];
      h *= 0x100000001b3LL;
    }
    a[i].z = (char*)p;
    a[i].n = k;
    a[i].h = h = ((h%281474976710597LL)<<LENGTH_MASK_SZ) | k;
    h2 = h % nToken;
    a[i].iNext = a[h2].iHash;
    a[h2].iHash = i+1;
    p += k; n -= k;
    i++;
  };
  assert( i==nToken );

  /* Return results */
  *pnToken = nToken;
  return a;
}

/*
** Return zero if two DLine elements are identical.
*/
static int compare_dline(const DLine *pA, const DLine *pB){
  if( pA->h!=pB->h ) return 1;
  return memcmp(pA->z,pB->z, pA->h&LENGTH_MASK);
2995
2996
2997
2998
2999
3000
3001






3002
3003
3004
3005

3006
3007
3008
3009
3010
3011
3012
  /* Prepare the input files */
  memset(&c, 0, sizeof(c));
  if( (pCfg->diffFlags & DIFF_IGNORE_ALLWS)==DIFF_IGNORE_ALLWS ){
    c.xDiffer = compare_dline_ignore_allws;
  }else{
    c.xDiffer = compare_dline;
  }






  c.aFrom = break_into_lines(blob_str(pA_Blob), blob_size(pA_Blob),
                             &c.nFrom, pCfg->diffFlags);
  c.aTo = break_into_lines(blob_str(pB_Blob), blob_size(pB_Blob),
                           &c.nTo, pCfg->diffFlags);

  if( c.aFrom==0 || c.aTo==0 ){
    fossil_free(c.aFrom);
    fossil_free(c.aTo);
    if( pOut ){
      diff_errmsg(pOut, DIFF_CANNOT_COMPUTE_BINARY, pCfg->diffFlags);
    }
    return 0;







>
>
>
>
>
>
|
|
|
|
>







3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
  /* Prepare the input files */
  memset(&c, 0, sizeof(c));
  if( (pCfg->diffFlags & DIFF_IGNORE_ALLWS)==DIFF_IGNORE_ALLWS ){
    c.xDiffer = compare_dline_ignore_allws;
  }else{
    c.xDiffer = compare_dline;
  }
  if( pCfg->diffFlags & DIFF_BY_TOKEN ){
    c.aFrom = break_into_tokens(blob_str(pA_Blob), blob_size(pA_Blob),
                               &c.nFrom, pCfg->diffFlags);
    c.aTo = break_into_tokens(blob_str(pB_Blob), blob_size(pB_Blob),
                             &c.nTo, pCfg->diffFlags);
  }else{
    c.aFrom = break_into_lines(blob_str(pA_Blob), blob_size(pA_Blob),
                               &c.nFrom, pCfg->diffFlags);
    c.aTo = break_into_lines(blob_str(pB_Blob), blob_size(pB_Blob),
                             &c.nTo, pCfg->diffFlags);
  }
  if( c.aFrom==0 || c.aTo==0 ){
    fossil_free(c.aFrom);
    fossil_free(c.aTo);
    if( pOut ){
      diff_errmsg(pOut, DIFF_CANNOT_COMPUTE_BINARY, pCfg->diffFlags);
    }
    return 0;
3033
3034
3035
3036
3037
3038
3039
















3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
      if( pOut ) diff_errmsg(pOut, DIFF_TOO_MANY_CHANGES, pCfg->diffFlags);
      return 0;
    }
  }
  if( (pCfg->diffFlags & DIFF_NOOPT)==0 ){
    diff_optimize(&c);
  }

















  if( pOut ){
    if( pCfg->diffFlags & DIFF_NUMSTAT ){
      int nDel = 0, nIns = 0, i;
      for(i=0; c.aEdit[i] || c.aEdit[i+1] || c.aEdit[i+2]; i+=3){
        nDel += c.aEdit[i+1];
        nIns += c.aEdit[i+2];
      }
      g.diffCnt[1] += nIns;
      g.diffCnt[2] += nDel;
      if( nIns+nDel ){
        g.diffCnt[0]++;
        blob_appendf(pOut, "%10d %10d", nIns, nDel);
      }
    }else if( pCfg->diffFlags & DIFF_RAW ){
      const int *R = c.aEdit;
      unsigned int r;
      for(r=0; R[r] || R[r+1] || R[r+2]; r += 3){
        blob_appendf(pOut, " copy %6d  delete %6d  insert %6d\n",
                     R[r], R[r+1], R[r+2]);
      }
    }else if( pCfg->diffFlags & DIFF_JSON ){







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>














|







3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
      if( pOut ) diff_errmsg(pOut, DIFF_TOO_MANY_CHANGES, pCfg->diffFlags);
      return 0;
    }
  }
  if( (pCfg->diffFlags & DIFF_NOOPT)==0 ){
    diff_optimize(&c);
  }
  if( (pCfg->diffFlags & DIFF_BY_TOKEN)!=0 ){
    /* Convert token counts into byte counts. */
    int i;
    int iA = 0;
    int iB = 0;
    for(i=0; c.aEdit[i] || c.aEdit[i+1] || c.aEdit[i+2]; i+=3){
      int k, sum;
      for(k=0, sum=0; k<c.aEdit[i]; k++) sum += c.aFrom[iA++].n;
      iB += c.aEdit[i];
      c.aEdit[i] = sum;
      for(k=0, sum=0; k<c.aEdit[i+1]; k++) sum += c.aFrom[iA++].n;
      c.aEdit[i+1] = sum;
      for(k=0, sum=0; k<c.aEdit[i+2]; k++) sum += c.aTo[iB++].n;
      c.aEdit[i+2] = sum;
    }
  }

  if( pOut ){
    if( pCfg->diffFlags & DIFF_NUMSTAT ){
      int nDel = 0, nIns = 0, i;
      for(i=0; c.aEdit[i] || c.aEdit[i+1] || c.aEdit[i+2]; i+=3){
        nDel += c.aEdit[i+1];
        nIns += c.aEdit[i+2];
      }
      g.diffCnt[1] += nIns;
      g.diffCnt[2] += nDel;
      if( nIns+nDel ){
        g.diffCnt[0]++;
        blob_appendf(pOut, "%10d %10d", nIns, nDel);
      }
    }else if( pCfg->diffFlags & (DIFF_RAW|DIFF_BY_TOKEN) ){
      const int *R = c.aEdit;
      unsigned int r;
      for(r=0; R[r] || R[r+1] || R[r+2]; r += 3){
        blob_appendf(pOut, " copy %6d  delete %6d  insert %6d\n",
                     R[r], R[r+1], R[r+2]);
      }
    }else if( pCfg->diffFlags & DIFF_JSON ){
3155
3156
3157
3158
3159
3160
3161



3162
3163
3164
3165
3166
3167
3168
      diffFlags |= DIFF_TCL;
    }

    /* Undocumented and unsupported flags used for development
    ** debugging and analysis: */
    if( find_option("debug",0,0)!=0 ) diffFlags |= DIFF_DEBUG;
    if( find_option("raw",0,0)!=0 )   diffFlags |= DIFF_RAW;



  }
  if( (z = find_option("context","c",1))!=0 ){
    char *zEnd;
    f = (int)strtol(z, &zEnd, 10);
    if( zEnd[0]==0 && errno!=ERANGE ){
      pCfg->nContext = f;
      diffFlags |= DIFF_CONTEXT_EX;







>
>
>







3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
      diffFlags |= DIFF_TCL;
    }

    /* Undocumented and unsupported flags used for development
    ** debugging and analysis: */
    if( find_option("debug",0,0)!=0 ) diffFlags |= DIFF_DEBUG;
    if( find_option("raw",0,0)!=0 )   diffFlags |= DIFF_RAW;
    if( find_option("bytoken",0,0)!=0 ){
      diffFlags = DIFF_RAW|DIFF_BY_TOKEN;
    }
  }
  if( (z = find_option("context","c",1))!=0 ){
    char *zEnd;
    f = (int)strtol(z, &zEnd, 10);
    if( zEnd[0]==0 && errno!=ERANGE ){
      pCfg->nContext = f;
      diffFlags |= DIFF_CONTEXT_EX;

Changes to src/merge.c.

163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  }else{
    mb.zPivot = mprintf("%s (baseline)", file_tail(zFN));
    rid = db_column_int(&q, 1);
    content_get(rid, &pivot);
  }
  mb.pPivot = &pivot;

  /* Set up the merge-in as V1 */
  zFN = db_column_text(&q, 5);
  if( zFN==0 ){
    /* File deleted in the merged-in branch */
    mb.zV1 = "(deleted file)";
    blob_zero(&v1);
  }else{
    mb.zV1 = mprintf("%s (merge-in)", file_tail(zFN));
    rid = db_column_int(&q, 6);
    content_get(rid, &v1);
  }
  mb.pV1 = &v1;

  /* Set up the local content as V2 */
  zFN = db_column_text(&q, 2);
  if( zFN==0 ){
    /* File added by merge */
    mb.zV2 = "(no original)";
    blob_zero(&v2);
  }else{
    mb.zV2 = mprintf("%s (local)", file_tail(zFN));
    rid = db_column_int(&q, 3);
    sz = db_column_int(&q, 4);
    if( rid==0 && sz>0 ){
      /* The origin file had been edited so we'll have to pull its
      ** original content out of the undo buffer */
      Stmt q2;
      db_prepare(&q2, 
        "SELECT content FROM undo"
        " WHERE pathname=%Q AND octet_length(content)=%d",
        zFN, sz
      );
      blob_zero(&v2);
      if( db_step(&q2)==SQLITE_ROW ){
        db_column_blob(&q, 0, &v2);
      }else{
        mb.zV2 = "(local content missing)";
      }
      db_finalize(&q2);
    }else{
      /* The origin file was unchanged when the merge first occurred */
      content_get(rid, &v2);
    }
  }
  mb.pV2 = &v2;

  /* Set up the output */
  zFN = db_column_text(&q, 7);
  if( zFN==0 ){
    mb.zOut = "(Merge Result)";
  }else{
    mb.zOut = mprintf("%s (after merge)", file_tail(zFN));







|



|
|

|

|

|

|



|
|

|











|

|

|




|


|







163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
  }else{
    mb.zPivot = mprintf("%s (baseline)", file_tail(zFN));
    rid = db_column_int(&q, 1);
    content_get(rid, &pivot);
  }
  mb.pPivot = &pivot;

  /* Set up the merge-in as V2 */
  zFN = db_column_text(&q, 5);
  if( zFN==0 ){
    /* File deleted in the merged-in branch */
    mb.zV2 = "(deleted file)";
    blob_zero(&v2);
  }else{
    mb.zV2 = mprintf("%s (merge-in)", file_tail(zFN));
    rid = db_column_int(&q, 6);
    content_get(rid, &v2);
  }
  mb.pV2 = &v2;

  /* Set up the local content as V1 */
  zFN = db_column_text(&q, 2);
  if( zFN==0 ){
    /* File added by merge */
    mb.zV1 = "(no original)";
    blob_zero(&v1);
  }else{
    mb.zV1 = mprintf("%s (local)", file_tail(zFN));
    rid = db_column_int(&q, 3);
    sz = db_column_int(&q, 4);
    if( rid==0 && sz>0 ){
      /* The origin file had been edited so we'll have to pull its
      ** original content out of the undo buffer */
      Stmt q2;
      db_prepare(&q2, 
        "SELECT content FROM undo"
        " WHERE pathname=%Q AND octet_length(content)=%d",
        zFN, sz
      );
      blob_zero(&v1);
      if( db_step(&q2)==SQLITE_ROW ){
        db_column_blob(&q, 0, &v1);
      }else{
        mb.zV1 = "(local content missing)";
      }
      db_finalize(&q2);
    }else{
      /* The origin file was unchanged when the merge first occurred */
      content_get(rid, &v1);
    }
  }
  mb.pV1 = &v1;

  /* Set up the output */
  zFN = db_column_text(&q, 7);
  if( zFN==0 ){
    mb.zOut = "(Merge Result)";
  }else{
    mb.zOut = mprintf("%s (after merge)", file_tail(zFN));

Changes to src/merge.tcl.

98
99
100
101
102
103
104

105
106
107
108
109
110
111
112
  global fossilcmd ncontext current_file
  if {$ncontext=="All"} {
    set cmd "$fossilcmd -c -1"
  } else {
    set cmd "$fossilcmd -c $ncontext"
  }
  if {[info exists current_file]} {

    append cmd " -tcl [list $current_file]"
  }
  if {[catch {
    set in [open $cmd r]
    fconfigure $in -encoding utf-8
    set mergetxt [read $in]
    close $in
  } msg]} {







>
|







98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  global fossilcmd ncontext current_file
  if {$ncontext=="All"} {
    set cmd "$fossilcmd -c -1"
  } else {
    set cmd "$fossilcmd -c $ncontext"
  }
  if {[info exists current_file]} {
    regsub {^[A-Z]+ } $current_file {} fn
    append cmd " -tcl [list $fn]"
  }
  if {[catch {
    set in [open $cmd r]
    fconfigure $in -encoding utf-8
    set mergetxt [read $in]
    close $in
  } msg]} {
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139


140
141
142
143
144
145
146
147
148
149
150
151
152
153

154



155
156
157
158
159
160
161
162
163
164
165
166


167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183


184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201


202
203
204
205
206
207
208
209
210
211
212
213











214
215
216
217
218
219
220
  set lnA 1
  set lnB 1
  set lnC 1
  set lnD 1
  foreach {A B C D} $mergetxt {
    set key1 [string index $A 0]
    if {$key1=="S"} {
      set N [string range $A 1 end]
      incr lnA $N
      incr lnB $N
      incr lnC $N
      incr lnD $N
      .lnA insert end ...\n hrln
      .txtA insert end [string repeat . 30]\n hrtxt
      .lnB insert end ...\n hrln
      .txtB insert end [string repeat . 30]\n hrtxt
      .lnC insert end ...\n hrln
      .txtC insert end [string repeat . 30]\n hrtxt
      .lnD insert end ...\n hrln
      .txtD insert end [string repeat . 30]\n hrtxt


      continue
    }
    set key2 [string index $B 0]
    set key3 [string index $C 0]
    set key4 [string index $D 0]
    if {$key4=="X"} {set dtag rm} {set dtag -}
    if {$key1=="."} {
      .lnA insert end \n -
      .txtA insert end \n $dtag
    } elseif {$key1=="N"} {
      .nameA config -text [string range $A 1 end]
    } else {
      .lnA insert end $lnA\n -
      incr lnA

      .txtA insert end [string range $A 1 end]\n $dtag



    }
    if {$key2=="."} {
      .lnB insert end \n -
      .txtB insert end \n $dtag
    } elseif {$key2=="N"} {
      .nameB config -text [string range $B 1 end]
    } else {
      .lnB insert end $lnB\n -
      incr lnB
      if {$key4=="2"} {set tag chng} {set tag $dtag}
      if {$key2=="1"} {
        .txtB insert end [string range $A 1 end]\n $tag


      } else {
        .txtB insert end [string range $B 1 end]\n $tag
      }
    }
    if {$key3=="."} {
      .lnC insert end \n -
      .txtC insert end \n $dtag
   } elseif {$key3=="N"} {
      .nameC config -text [string range $C 1 end]
    } else {
      .lnC insert end $lnC\n -
      incr lnC
      if {$key4=="3"} {set tag add} {set tag $dtag}
      if {$key3=="1"} {
        .txtC insert end [string range $A 1 end]\n $tag
      } elseif {$key3=="2"} {
        .txtC insert end [string range $B 1 end]\n chng


       } else {
        .txtC insert end [string range $C 1 end]\n $tag
      }
    }
    if {$key4=="." || $key4=="X"} {
      .lnD insert end \n -
      .txtD insert end \n $dtag
    } elseif {$key4=="N"} {
      .nameD config -text [string range $D 1 end]
    } else {
      .lnD insert end $lnD\n -
      incr lnD
      if {$key4=="1"} {
        .txtD insert end [string range $A 1 end]\n -
      } elseif {$key4=="2"} {
        .txtD insert end [string range $B 1 end]\n chng
      } elseif {$key4=="3"} {
        .txtD insert end [string range $C 1 end]\n add


      } else {
        .txtD insert end [string range $D 1 end]\n -
      }
    }
  }
  foreach c [cols] {
    set type [colType $c]
    if {$type ne "txt"} {
      $c config -width 6; # $widths($type)
    }
    $c config -state disabled
  }











}

proc viewDiff {idx} {
  .txtA yview $idx
  .txtA xview moveto 0
}








|
|
|
|
|
|
|
|
<
|
<
|
<
>
>





<


|





>
|
>
>
>



|





|


>
>






|
|




|




>
>
|



|

|











>
>












>
>
>
>
>
>
>
>
>
>
>







121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

136

137

138
139
140
141
142
143
144

145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  set lnA 1
  set lnB 1
  set lnC 1
  set lnD 1
  foreach {A B C D} $mergetxt {
    set key1 [string index $A 0]
    if {$key1=="S"} {
      scan [string range $A 1 end] "%d %d %d %d" nA nB nC nD
      foreach x {A B C D} {
        set N [set n$x]
        incr ln$x $N
        if {$N>0} {
          .ln$x insert end ...\n hrln
          .txt$x insert end [string repeat . 30]\n hrtxt
        } else {

          .ln$x insert end \n hrln

          .txt$x insert end \n hrtxt

        }
      }
      continue
    }
    set key2 [string index $B 0]
    set key3 [string index $C 0]
    set key4 [string index $D 0]

    if {$key1=="."} {
      .lnA insert end \n -
      .txtA insert end \n -
    } elseif {$key1=="N"} {
      .nameA config -text [string range $A 1 end]
    } else {
      .lnA insert end $lnA\n -
      incr lnA
      if {$key1=="X"} {
        .txtA insert end [string range $A 1 end]\n rm
      } else {
        .txtA insert end [string range $A 1 end]\n -
      }
    }
    if {$key2=="."} {
      .lnB insert end \n -
      .txtB insert end \n -
    } elseif {$key2=="N"} {
      .nameB config -text [string range $B 1 end]
    } else {
      .lnB insert end $lnB\n -
      incr lnB
      if {$key4=="2"} {set tag chng} {set tag -}
      if {$key2=="1"} {
        .txtB insert end [string range $A 1 end]\n $tag
      } elseif {$key2=="X"} {
        .txtB insert end [string range $B 1 end]\n rm
      } else {
        .txtB insert end [string range $B 1 end]\n $tag
      }
    }
    if {$key3=="."} {
      .lnC insert end \n -
      .txtC insert end \n -
    } elseif {$key3=="N"} {
      .nameC config -text [string range $C 1 end]
    } else {
      .lnC insert end $lnC\n -
      incr lnC
      if {$key4=="3"} {set tag add} {set tag -}
      if {$key3=="1"} {
        .txtC insert end [string range $A 1 end]\n $tag
      } elseif {$key3=="2"} {
        .txtC insert end [string range $B 1 end]\n chng
      } elseif {$key3=="X"} {
        .txtC insert end [string range $C 1 end]\n rm
      } else {
        .txtC insert end [string range $C 1 end]\n $tag
      }
    }
    if {$key4=="."} {
      .lnD insert end \n -
      .txtD insert end \n -
    } elseif {$key4=="N"} {
      .nameD config -text [string range $D 1 end]
    } else {
      .lnD insert end $lnD\n -
      incr lnD
      if {$key4=="1"} {
        .txtD insert end [string range $A 1 end]\n -
      } elseif {$key4=="2"} {
        .txtD insert end [string range $B 1 end]\n chng
      } elseif {$key4=="3"} {
        .txtD insert end [string range $C 1 end]\n add
      } elseif {$key4=="X"} {
        .txtD insert end [string range $D 1 end]\n rm
      } else {
        .txtD insert end [string range $D 1 end]\n -
      }
    }
  }
  foreach c [cols] {
    set type [colType $c]
    if {$type ne "txt"} {
      $c config -width 6; # $widths($type)
    }
    $c config -state disabled
  }
  set mx $lnA
  if {$lnB>$mx} {set mx $lnB}
  if {$lnC>$mx} {set mx $lnC}
  if {$lnD>$mx} {set mx $lnD}
  global lnWidth
  set lnWidth [string length [format %d $mx]]
  .lnA config -width $lnWidth
  .lnB config -width $lnWidth
  .lnC config -width $lnWidth
  .lnD config -width $lnWidth
  grid columnconfig . {0 2 4 6} -minsize $lnWidth
}

proc viewDiff {idx} {
  .txtA yview $idx
  .txtA xview moveto 0
}

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
}

proc disableSync {axis} {
  rename sync-$axis _sync-$axis
  interp alias {} sync-$axis {} noop
}

proc sync-x {col first last} {
  disableSync x
  $col xview moveto [expr {$first*[xvis $col]/($last-$first)}]
  foreach side {A B C D} {
    set sb .sbx$side
    set xview [.txt$side xview]
  }
  enableSync x
}

proc sync-y {first last} {
  disableSync y
  foreach c [cols] {
    $c yview moveto $first
  }
  if {$first > 0 || $last < 1} {
    grid .sby







<
<
<
<
<
<
<
<
<
<







277
278
279
280
281
282
283










284
285
286
287
288
289
290
}

proc disableSync {axis} {
  rename sync-$axis _sync-$axis
  interp alias {} sync-$axis {} noop
}











proc sync-y {first last} {
  disableSync y
  foreach c [cols] {
    $c yview moveto $first
  }
  if {$first > 0 || $last < 1} {
    grid .sby
325
326
327
328
329
330
331

332
333
334

335







336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379


380
381
382
383
384
385
386



387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419

420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  End   y {moveto 1}
} {
  bind . <$key> "scroll-$axis $args; break"
  bind . <Shift-$key> continue
}

frame .bb

if {[info exists filelist]} {
  label .bb.filetag -text "File:"
  set current_file [lindex $filelist 1]

  trace add variable current_file write readMerge







  ::ttk::menubutton .bb.files -text $current_file
  if {[tk windowingsystem] eq "win32"} {
    ::ttk::style theme use winnative
    .bb.files configure -padding {20 1 10 2}
  }
  toplevel .wfiles
  wm withdraw .wfiles
  update idletasks
  wm transient .wfiles .
  wm overrideredirect .wfiles 1
  set ht [expr {[llength $filelist]/2}]
  if {$ht>$CFG(LB_HEIGHT)} {set ht $CFG(LB_HEIGHT)}
  listbox .wfiles.lb -width 0 -height $ht -activestyle none \
    -yscroll {.wfiles.sb set}
  set mx 1
  foreach {op fn} $filelist {
    set n [string length $fn]
    if {$n>$mx} {set mx $n}
    .wfiles.lb insert end [format "%-9s %s" $op $fn]
  }
  .bb.files config -width $mx
  ::ttk::scrollbar .wfiles.sb -command {.wfiles.lb yview}
  grid .wfiles.lb .wfiles.sb -sticky ns
  bind .bb.files <1> {
    set x [winfo rootx %W]
    set y [expr {[winfo rooty %W]+[winfo height %W]}]
    wm geometry .wfiles +$x+$y
    wm deiconify .wfiles
    focus .wfiles.lb
  }
  bind .wfiles <FocusOut> {wm withdraw .wfiles}
  bind .wfiles <Escape> {focus .}
  foreach evt {1 Return} {
    bind .wfiles.lb <$evt> {
      set ii [%W curselection]
      set ::current_file [lindex $::filelist [expr {$ii*2+1}]]
      .bb.files config -text $::current_file
      focus .
      break
    }
  }
  bind .wfiles.lb <Motion> {
    %W selection clear 0 end
    %W selection set @%x,%y


  }
}

label .bb.ctxtag -text "Context:"
set context_choices {3 6 12 25 50 100 All}
if {$ncontext<0} {set ncontext All}
trace add variable ncontext write readMerge



::ttk::menubutton .bb.ctx -text $ncontext
if {[tk windowingsystem] eq "win32"} {
  ::ttk::style theme use winnative
  .bb.ctx configure -padding {20 1 10 2}
}
toplevel .wctx
wm withdraw .wctx
update idletasks
wm transient .wctx .
wm overrideredirect .wctx 1
listbox .wctx.lb -width 0 -height 7 -activestyle none
.wctx.lb insert end {*}$context_choices
pack .wctx.lb
bind .bb.ctx <1> {
  set x [winfo rootx %W]
  set y [expr {[winfo rooty %W]+[winfo height %W]}]
  wm geometry .wctx +$x+$y
  wm deiconify .wctx
  focus .wctx.lb
}
bind .wctx <FocusOut> {wm withdraw .wctx}
bind .wctx <Escape> {focus .}
foreach evt {1 Return} {
  bind .wctx.lb <$evt> {
    set ::ncontext [lindex $::context_choices [%W curselection]]
    .bb.ctx config -text $::ncontext
    focus .
    break
  }
}
bind .wctx.lb <Motion> {
  %W selection clear 0 end
  %W selection set @%x,%y

}

foreach {side syncCol} {A .txtB B .txtA C .txtC D .txtD} {
  set ln .ln$side
  text $ln
  $ln tag config - -justify right

  set txt .txt$side
  text $txt -width $CFG(WIDTH) -height $CFG(HEIGHT) -wrap none \
    -xscroll "sync-x $syncCol"
  catch {$txt config -tabstyle wordprocessor} ;# Required for Tk>=8.5
  foreach tag {add rm chng} {
    $txt tag config $tag -background $CFG([string toupper $tag]_BG)
    $txt tag lower $tag
  }
  $txt tag config fn -background $CFG(FN_BG) -foreground $CFG(FN_FG) \
    -justify center







>

<
|
>
|
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>







>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>









|







335
336
337
338
339
340
341
342
343

344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  End   y {moveto 1}
} {
  bind . <$key> "scroll-$axis $args; break"
  bind . <Shift-$key> continue
}

frame .bb
set useOptionMenu 1
if {[info exists filelist]} {

  set current_file "[lindex $filelist 0] [lindex $filelist 1]"
  if {[llength $filelist]>2} {
    trace add variable current_file write readMerge
  
    if {$tcl_platform(os)=="Darwin" || [llength $filelist]<30} {
      set fnlist {}
      foreach {op fn} $filelist {lappend fnlist "$op $fn"}
      tk_optionMenu .bb.files current_file {*}$fnlist
    } else {
      set useOptionMenu 0
      ::ttk::menubutton .bb.files -text $current_file
      if {[tk windowingsystem] eq "win32"} {
        ::ttk::style theme use winnative
        .bb.files configure -padding {20 1 10 2}
      }
      toplevel .wfiles
      wm withdraw .wfiles
      update idletasks
      wm transient .wfiles .
      wm overrideredirect .wfiles 1
      set ht [expr {[llength $filelist]/2}]
      if {$ht>$CFG(LB_HEIGHT)} {set ht $CFG(LB_HEIGHT)}
      listbox .wfiles.lb -width 0 -height $ht -activestyle none \
        -yscroll {.wfiles.sb set}
      set mx 1
      foreach {op fn} $filelist {
        set n [string length $fn]
        if {$n>$mx} {set mx $n}
        .wfiles.lb insert end "$op $fn"
      }
      .bb.files config -width $mx
      ::ttk::scrollbar .wfiles.sb -command {.wfiles.lb yview}
      grid .wfiles.lb .wfiles.sb -sticky ns
      bind .bb.files <1> {
        set x [winfo rootx %W]
        set y [expr {[winfo rooty %W]+[winfo height %W]}]
        wm geometry .wfiles +$x+$y
        wm deiconify .wfiles
        focus .wfiles.lb
      }
      bind .wfiles <FocusOut> {wm withdraw .wfiles}
      bind .wfiles <Escape> {focus .}
      foreach evt {1 Return} {
        bind .wfiles.lb <$evt> {
          set ii [%W curselection]
          set ::current_file [%W get $ii]
          .bb.files config -text $::current_file
          focus .
          break
        }
      }
      bind .wfiles.lb <Motion> {
        %W selection clear 0 end
        %W selection set @%x,%y
      }
    }
  }
}

label .bb.ctxtag -text "Context:"
set context_choices {3 6 12 25 50 100 All}
if {$ncontext<0} {set ncontext All}
trace add variable ncontext write readMerge
if {$tcl_platform(os)=="Darwin" || $useOptionMenu} {
  tk_optionMenu .bb.ctx ncontext {*}$context_choices
} else {
  ::ttk::menubutton .bb.ctx -text $ncontext
  if {[tk windowingsystem] eq "win32"} {
    ::ttk::style theme use winnative
    .bb.ctx configure -padding {20 1 10 2}
  }
  toplevel .wctx
  wm withdraw .wctx
  update idletasks
  wm transient .wctx .
  wm overrideredirect .wctx 1
  listbox .wctx.lb -width 0 -height 7 -activestyle none
  .wctx.lb insert end {*}$context_choices
  pack .wctx.lb
  bind .bb.ctx <1> {
    set x [winfo rootx %W]
    set y [expr {[winfo rooty %W]+[winfo height %W]}]
    wm geometry .wctx +$x+$y
    wm deiconify .wctx
    focus .wctx.lb
  }
  bind .wctx <FocusOut> {wm withdraw .wctx}
  bind .wctx <Escape> {focus .}
  foreach evt {1 Return} {
    bind .wctx.lb <$evt> {
      set ::ncontext [lindex $::context_choices [%W curselection]]
      .bb.ctx config -text $::ncontext
      focus .
      break
    }
  }
  bind .wctx.lb <Motion> {
    %W selection clear 0 end
    %W selection set @%x,%y
  }
}

foreach {side syncCol} {A .txtB B .txtA C .txtC D .txtD} {
  set ln .ln$side
  text $ln
  $ln tag config - -justify right

  set txt .txt$side
  text $txt -width $CFG(WIDTH) -height $CFG(HEIGHT) -wrap none \
    -xscroll ".sbx$side set"
  catch {$txt config -tabstyle wordprocessor} ;# Required for Tk>=8.5
  foreach tag {add rm chng} {
    $txt tag config $tag -background $CFG([string toupper $tag]_BG)
    $txt tag lower $tag
  }
  $txt tag config fn -background $CFG(FN_BG) -foreground $CFG(FN_FG) \
    -justify center
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568


569
570
571
    $w tag config search -background {#fcc000}
  }
  set ::search $w
}
::ttk::button .bb.quit -text {Quit} -command exit
::ttk::button .bb.search -text {Search} -command searchOnOff
pack .bb.quit -side left
if {[info exists filelist]} {
  pack .bb.filetag .bb.files -side left
}
pack .bb.ctxtag .bb.ctx -side left
pack .bb.search -side left
grid rowconfigure . 1 -weight 1
set rn 0
foreach {lnwid txtwid} [cols] {
  grid columnconfigure . $rn            -weight 1 -uniform a
  grid columnconfigure . [expr {$rn+1}] -weight 1 -uniform b
  incr rn 2
}
grid .bb -row 0 -columnspan 8
grid .nameA -row 1 -column 1 -sticky ew
grid .nameB -row 1 -column 3 -sticky ew
grid .nameC -row 1 -column 5 -sticky ew
grid .nameD -row 1 -column 7 -sticky ew
eval grid [cols] -row 2 -sticky nsew
grid .sby -row 2 -column 8 -sticky ns
grid .sbxA -row 3 -column 1 -sticky ew
grid .sbxB -row 3 -column 3 -sticky ew
grid .sbxC -row 3 -column 5 -sticky ew
grid .sbxD -row 3 -column 7 -sticky ew
readMerge



.spacer config -height [winfo height .sbxA]
wm deiconify .







|
|



|
<
<
|
<
|
<











|
>
>



562
563
564
565
566
567
568
569
570
571
572
573
574


575

576

577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
    $w tag config search -background {#fcc000}
  }
  set ::search $w
}
::ttk::button .bb.quit -text {Quit} -command exit
::ttk::button .bb.search -text {Search} -command searchOnOff
pack .bb.quit -side left
if {[winfo exists .bb.files]} {
  pack .bb.files -side left
}
pack .bb.ctxtag .bb.ctx -side left
pack .bb.search -side left
grid rowconfigure . 1 -weight 1 -minsize [winfo reqheight .nameA]


grid rowconfigure . 2 -weight 100

readMerge

grid .bb -row 0 -columnspan 8
grid .nameA -row 1 -column 1 -sticky ew
grid .nameB -row 1 -column 3 -sticky ew
grid .nameC -row 1 -column 5 -sticky ew
grid .nameD -row 1 -column 7 -sticky ew
eval grid [cols] -row 2 -sticky nsew
grid .sby -row 2 -column 8 -sticky ns
grid .sbxA -row 3 -column 1 -sticky ew
grid .sbxB -row 3 -column 3 -sticky ew
grid .sbxC -row 3 -column 5 -sticky ew
grid .sbxD -row 3 -column 7 -sticky ew
grid columnconfigure . {0 2 4 6} \
   -weight 1 -uniform a -minsize [winfo reqwidth .lnA]
grid columnconfigure . {1 3 5 7} -weight 100 -uniform b

.spacer config -height [winfo height .sbxA]
wm deiconify .

Changes to src/merge3.c.

79
80
81
82
83
84
85

86
87
88
89
90
91
92

/*
** Text of boundary markers for merge conflicts.
*/
static const char *const mergeMarker[] = {
 /*123456789 123456789 123456789 123456789 123456789 123456789 123456789*/
  "<<<<<<< BEGIN MERGE CONFLICT: local copy shown first <<<<<<<<<<<<",

  "||||||| COMMON ANCESTOR content follows |||||||||||||||||||||||||",
  "======= MERGED IN content follows ===============================",
  ">>>>>>> END MERGE CONFLICT >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
};

/*
** Return true if the input blob contains any CR/LF pairs on the first







>







79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

/*
** Text of boundary markers for merge conflicts.
*/
static const char *const mergeMarker[] = {
 /*123456789 123456789 123456789 123456789 123456789 123456789 123456789*/
  "<<<<<<< BEGIN MERGE CONFLICT: local copy shown first <<<<<<<<<<<<",
  "####### SUGGESTED CONFLICT RESOLUTION follows ###################",
  "||||||| COMMON ANCESTOR content follows |||||||||||||||||||||||||",
  "======= MERGED IN content follows ===============================",
  ">>>>>>> END MERGE CONFLICT >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
};

/*
** Return true if the input blob contains any CR/LF pairs on the first
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167

168
169
170
171
172
173
174
  void (*xEnd)(MergeBuilder*);
  void (*xDestroy)(MergeBuilder*);
  const char *zPivot;        /* Label or name for the pivot */
  const char *zV1;           /* Label or name for the V1 file */
  const char *zV2;           /* Label or name for the V2 file */
  const char *zOut;          /* Label or name for the output */
  Blob *pPivot;              /* The common ancestor */
  Blob *pV1;                 /* First variant */
  Blob *pV2;                 /* Second variant */
  Blob *pOut;                /* Write merge results here */
  int useCrLf;               /* Use CRLF line endings */
  int nContext;              /* Size of unchanged line boundaries */
  unsigned int mxPivot;      /* Number of lines in the pivot */
  unsigned int mxV1;         /* Number of lines in V1 */
  unsigned int mxV2;         /* Number of lines in V2 */
  unsigned int lnPivot;      /* Lines read from pivot */
  unsigned int lnV1;         /* Lines read from v1 */
  unsigned int lnV2;         /* Lines read from v2 */
  unsigned int lnOut;        /* Lines written to out */
  unsigned int nConflict;    /* Number of conflicts seen */

};
#endif /* INTERFACE */


/************************* Generic MergeBuilder ******************************/
/* These are generic methods for MergeBuilder.  They just output debugging
** information.  But some of them are useful as base methods for other useful







|
|











>







149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
  void (*xEnd)(MergeBuilder*);
  void (*xDestroy)(MergeBuilder*);
  const char *zPivot;        /* Label or name for the pivot */
  const char *zV1;           /* Label or name for the V1 file */
  const char *zV2;           /* Label or name for the V2 file */
  const char *zOut;          /* Label or name for the output */
  Blob *pPivot;              /* The common ancestor */
  Blob *pV1;                 /* First variant (local copy) */
  Blob *pV2;                 /* Second variant (merged in) */
  Blob *pOut;                /* Write merge results here */
  int useCrLf;               /* Use CRLF line endings */
  int nContext;              /* Size of unchanged line boundaries */
  unsigned int mxPivot;      /* Number of lines in the pivot */
  unsigned int mxV1;         /* Number of lines in V1 */
  unsigned int mxV2;         /* Number of lines in V2 */
  unsigned int lnPivot;      /* Lines read from pivot */
  unsigned int lnV1;         /* Lines read from v1 */
  unsigned int lnV2;         /* Lines read from v2 */
  unsigned int lnOut;        /* Lines written to out */
  unsigned int nConflict;    /* Number of conflicts seen */
  u64 diffFlags;             /* Flags for difference engine */
};
#endif /* INTERFACE */


/************************* Generic MergeBuilder ******************************/
/* These are generic methods for MergeBuilder.  They just output debugging
** information.  But some of them are useful as base methods for other useful
259
260
261
262
263
264
265


























































































266
267
268
269
270
271
272
  p->xChngV1 = dbgChngV1;
  p->xChngV2 = dbgChngV2;
  p->xChngBoth = dbgChngBoth;
  p->xConflict = dbgConflict;
  p->xEnd = dbgStartEnd;
  p->xDestroy = dbgDestroy;
}



























































































/************************* MergeBuilderText **********************************/
/* This version of MergeBuilder actually performs a merge on file and puts
** the result in pOut
*/
static void txtStart(MergeBuilder *p){
  /* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
  p->xChngV1 = dbgChngV1;
  p->xChngV2 = dbgChngV2;
  p->xChngBoth = dbgChngBoth;
  p->xConflict = dbgConflict;
  p->xEnd = dbgStartEnd;
  p->xDestroy = dbgDestroy;
}

/************************* MergeBuilderToken ********************************/
/* This version of MergeBuilder actually performs a merge on file that
** are broken up into tokens instead of lines, and puts the result in pOut.
*/
static void tokenSame(MergeBuilder *p, unsigned int N){
  blob_append(p->pOut, p->pPivot->aData+p->pPivot->iCursor, N);
  p->pPivot->iCursor += N;
  p->pV1->iCursor += N;
  p->pV2->iCursor += N;
}
static void tokenChngV1(MergeBuilder *p, unsigned int nPivot, unsigned nV1){
  blob_append(p->pOut, p->pV1->aData+p->pV1->iCursor, nV1);
  p->pPivot->iCursor += nPivot;
  p->pV1->iCursor += nV1;
  p->pV2->iCursor += nPivot;
}
static void tokenChngV2(MergeBuilder *p, unsigned int nPivot, unsigned nV2){
  blob_append(p->pOut, p->pV2->aData+p->pV2->iCursor, nV2);
  p->pPivot->iCursor += nPivot;
  p->pV1->iCursor += nPivot;
  p->pV2->iCursor += nV2;
}
static void tokenChngBoth(MergeBuilder *p, unsigned int nPivot, unsigned nV){
  blob_append(p->pOut, p->pV2->aData+p->pV2->iCursor, nV);
  p->pPivot->iCursor += nPivot;
  p->pV1->iCursor += nV;
  p->pV2->iCursor += nV;
}
static void tokenConflict(
  MergeBuilder *p,
  unsigned int nPivot,
  unsigned int nV1,
  unsigned int nV2
){
  /* For a token-merge conflict, use the text from the merge-in */
  blob_append(p->pOut, p->pV2->aData+p->pV2->iCursor, nV2);
  p->pPivot->iCursor += nPivot;
  p->pV1->iCursor += nV1;
  p->pV2->iCursor += nV2;
}
static void mergebuilder_init_token(MergeBuilder *p){
  mergebuilder_init(p);
  p->xSame = tokenSame;
  p->xChngV1 = tokenChngV1;
  p->xChngV2 = tokenChngV2;
  p->xChngBoth = tokenChngBoth;
  p->xConflict = tokenConflict;
  p->diffFlags = DIFF_BY_TOKEN;
}

/*
** Attempt to do a low-level merge on a conflict.  The conflict is
** described by the first four parameters, which are the same as the
** arguments to the xConflict method of the MergeBuilder object.
** This routine attempts to resolve the conflict by looking at
** elements of the conflict region that are finer grain than complete
** lines of text.
**
** The result is written into Blob pOut.  pOut is initialized by this
** routine.
*/
int merge_try_to_resolve_conflict(
  MergeBuilder *pMB,     /* MergeBuilder that encounter conflict */
  unsigned int nPivot,   /* Lines of conflict in the pivot */
  unsigned int nV1,      /* Lines of conflict in V1 */
  unsigned int nV2,      /* Lines of conflict in V2 */
  Blob *pOut             /* Write resolution text here */
){
  int nConflict;
  MergeBuilder mb;
  Blob pv, v1, v2;
  mergebuilder_init_token(&mb);
  blob_extract_lines(pMB->pPivot, nPivot, &pv);
  blob_extract_lines(pMB->pV1, nV1, &v1);
  blob_extract_lines(pMB->pV2, nV2, &v2);
  blob_zero(pOut);
  mb.pPivot = &pv;
  mb.pV1 = &v1;
  mb.pV2 = &v2;
  mb.pOut = pOut;
  nConflict = merge_three_blobs(&mb);
  /* The pv, v1, and v2 blobs are all ephemeral and hence do not need
  ** to be freed. */
  /* mb has not allocated any resources, so we do not need to invoke
  ** the xDestroy method. */
  blob_add_final_newline(pOut);
  return nConflict;
}


/************************* MergeBuilderText **********************************/
/* This version of MergeBuilder actually performs a merge on file and puts
** the result in pOut
*/
static void txtStart(MergeBuilder *p){
  /* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),
303
304
305
306
307
308
309






310
311
312

313




314
315
316
317
318
319
320
321
322
323
324
325
326
}
static void txtConflict(
  MergeBuilder *p,
  unsigned int nPivot,
  unsigned int nV1,
  unsigned int nV2
){






  append_merge_mark(p->pOut, 0, p->lnV1, p->useCrLf);
  blob_copy_lines(p->pOut, p->pV1, nV1);         p->lnV1 += nV1;


  append_merge_mark(p->pOut, 1, p->lnPivot, p->useCrLf);




  blob_copy_lines(p->pOut, p->pPivot, nPivot);   p->lnPivot += nPivot;

  append_merge_mark(p->pOut, 2, p->lnV2, p->useCrLf);
  blob_copy_lines(p->pOut, p->pV2, nV2);         p->lnV2 += nV2;

  append_merge_mark(p->pOut, 3, -1, p->useCrLf);
}
static void mergebuilder_init_text(MergeBuilder *p){
  mergebuilder_init(p);
  p->xStart = txtStart;
  p->xSame = txtSame;
  p->xChngV1 = txtChngV1;
  p->xChngV2 = txtChngV2;







>
>
>
>
>
>
|


>
|
>
>
>
>


|


|







395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
}
static void txtConflict(
  MergeBuilder *p,
  unsigned int nPivot,
  unsigned int nV1,
  unsigned int nV2
){
  int nRes;   /* Lines in the computed conflict resolution */
  Blob res;   /* Text of the conflict resolution */
  
  merge_try_to_resolve_conflict(p, nPivot, nV1, nV2, &res);
  nRes = blob_linecount(&res);

  append_merge_mark(p->pOut, 0, p->lnV1+1, p->useCrLf);
  blob_copy_lines(p->pOut, p->pV1, nV1);         p->lnV1 += nV1;

  if( nRes>0 ){
    append_merge_mark(p->pOut, 1, 0, p->useCrLf);
    blob_copy_lines(p->pOut, &res, nRes);
  }

  append_merge_mark(p->pOut, 2, p->lnPivot+1, p->useCrLf);
  blob_copy_lines(p->pOut, p->pPivot, nPivot);   p->lnPivot += nPivot;

  append_merge_mark(p->pOut, 3, p->lnV2+1, p->useCrLf);
  blob_copy_lines(p->pOut, p->pV2, nV2);         p->lnV2 += nV2;

  append_merge_mark(p->pOut, 4, -1, p->useCrLf);
}
static void mergebuilder_init_text(MergeBuilder *p){
  mergebuilder_init(p);
  p->xStart = txtStart;
  p->xSame = txtSame;
  p->xChngV1 = txtChngV1;
  p->xChngV2 = txtChngV2;
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
** respectively.  The first character of each token provides auxiliary
** information:
**
**     .     This line is omitted.
**     N     Name of the file.
**     T     Literal text follows that should have a \n terminator.
**     R     Literal text follows that needs a \r\n terminator.
**     X     Merge conflict.  (Column 4 only)
**     Z     Literal text without a line terminator.
**     S     Skipped lines in all 4 files.
**     1     Text is a copy of token 1
**     2     Use data from data-token 2
**     3     Use data from data-token 3
*/

/* Write text that goes into the interior of a double-quoted string in TCL */
static void tclWriteQuotedText(Blob *pOut, const char *zIn, int nIn){







|

|







439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
** respectively.  The first character of each token provides auxiliary
** information:
**
**     .     This line is omitted.
**     N     Name of the file.
**     T     Literal text follows that should have a \n terminator.
**     R     Literal text follows that needs a \r\n terminator.
**     X     Merge conflict.
**     Z     Literal text without a line terminator.
**     S     Skipped lines.  Followed by number of lines to skip.
**     1     Text is a copy of token 1
**     2     Use data from data-token 2
**     3     Use data from data-token 3
*/

/* Write text that goes into the interior of a double-quoted string in TCL */
static void tclWriteQuotedText(Blob *pOut, const char *zIn, int nIn){
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389


390
391
392
393
394
395
396
    }else{
      blob_append_char(pOut, c);
    }
  }
}

/* Copy one line of text from pIn and append to pOut, encoded as TCL */
static void tclLineOfText(Blob *pOut, Blob *pIn){
  int i, k;
  for(i=pIn->iCursor; i<pIn->nUsed && pIn->aData[i]!='\n'; i++){}
  if( i==pIn->nUsed ){
    blob_append(pOut, "\"Z", 2);
    k = i;
  }else if( i>pIn->iCursor && pIn->aData[i-1]=='\r' ){
    blob_append(pOut, "\"R", 2);
    k = i-1;
    i++;
  }else{
    blob_append(pOut, "\"T", 2);
    k = i;
    i++;
  }


  tclWriteQuotedText(pOut, pIn->aData+pIn->iCursor, k-pIn->iCursor);
  pIn->iCursor = i;
  blob_append_char(pOut, '"');
}
static void tclStart(MergeBuilder *p){
  Blob *pOut = p->pOut;
  blob_append(pOut, "\"N", 2);







|



<


<



<



>
>







471
472
473
474
475
476
477
478
479
480
481

482
483

484
485
486

487
488
489
490
491
492
493
494
495
496
497
498
    }else{
      blob_append_char(pOut, c);
    }
  }
}

/* Copy one line of text from pIn and append to pOut, encoded as TCL */
static void tclLineOfText(Blob *pOut, Blob *pIn, char cType){
  int i, k;
  for(i=pIn->iCursor; i<pIn->nUsed && pIn->aData[i]!='\n'; i++){}
  if( i==pIn->nUsed ){

    k = i;
  }else if( i>pIn->iCursor && pIn->aData[i-1]=='\r' ){

    k = i-1;
    i++;
  }else{

    k = i;
    i++;
  }
  blob_append_char(pOut, '"');
  blob_append_char(pOut, cType);
  tclWriteQuotedText(pOut, pIn->aData+pIn->iCursor, k-pIn->iCursor);
  pIn->iCursor = i;
  blob_append_char(pOut, '"');
}
static void tclStart(MergeBuilder *p){
  Blob *pOut = p->pOut;
  blob_append(pOut, "\"N", 2);
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425

426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524





525
526




527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544





545
546





547
548
549
550
551
552
553
}
static void tclSame(MergeBuilder *p, unsigned int N){
  int i = 0;
  int nSkip;

  if( p->lnPivot>=2 || p->lnV1>2 || p->lnV2>2 ){
    while( i<N && i<p->nContext ){
      tclLineOfText(p->pOut, p->pPivot);
      blob_append(p->pOut, " 1 1 1\n", 7);
      i++;
    }
    nSkip = N - p->nContext*2;
  }else{
    nSkip = N - p->nContext;
  }
  if( nSkip>0 ){
    blob_appendf(p->pOut, "S%d . . .\n", nSkip);

    blob_copy_lines(0, p->pPivot, nSkip);
    i += nSkip;
  }

  p->lnPivot += N;
  p->lnV1 += N;
  p->lnV2 += N;

  if( p->lnPivot<p->mxPivot || p->lnV1<p->mxV1 || p->lnV2<p->mxV2 ){
    while( i<N ){
      tclLineOfText(p->pOut, p->pPivot);
      blob_append(p->pOut, " 1 1 1\n", 7);
      i++;
    }
  }

  blob_copy_lines(0, p->pV1, N);
  blob_copy_lines(0, p->pV2, N);
}
static void tclChngV1(MergeBuilder *p, unsigned int nPivot, unsigned int nV1){
  int i;
  for(i=0; i<nPivot && i<nV1; i++){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append_char(p->pOut, ' ');
    tclLineOfText(p->pOut, p->pV1);
    blob_append(p->pOut, " 1 2\n", 5);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append(p->pOut, " . 1 .\n", 7);
    i++;
  }
  while( i<nV1 ){
    blob_append(p->pOut, ". ", 2);
    tclLineOfText(p->pOut, p->pV1);
    blob_append(p->pOut, " . 2\n", 5);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nV1;
  p->lnV2 += nPivot;
  blob_copy_lines(0, p->pV2, nPivot);
}
static void tclChngV2(MergeBuilder *p, unsigned int nPivot, unsigned int nV2){
  int i;
  for(i=0; i<nPivot && i<nV2; i++){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append(p->pOut, " 1 ", 3);
    tclLineOfText(p->pOut, p->pV2);
    blob_append(p->pOut, " 3\n", 3);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append(p->pOut, " 1 . .\n", 7);
    i++;
  }
  while( i<nV2 ){
    blob_append(p->pOut, ". . ", 4);
    tclLineOfText(p->pOut, p->pV2);
    blob_append(p->pOut, " 3\n", 3);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nPivot;
  p->lnV2 += nV2;
  blob_copy_lines(0, p->pV1, nPivot);
}
static void tclChngBoth(MergeBuilder *p, unsigned int nPivot, unsigned int nV){
  int i;
  for(i=0; i<nPivot && i<nV; i++){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append_char(p->pOut, ' ');
    tclLineOfText(p->pOut, p->pV1);
    blob_append(p->pOut, " 2 2\n", 5);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot);
    blob_append(p->pOut, " . . .\n", 7);
    i++;
  }
  while( i<nV ){
    blob_append(p->pOut, ". ", 2);
    tclLineOfText(p->pOut, p->pV1);
    blob_append(p->pOut, " 2 2\n", 5);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nV;
  p->lnV2 += nV;
  blob_copy_lines(0, p->pV2, nV);
}
static void tclConflict(
  MergeBuilder *p,
  unsigned int nPivot,
  unsigned int nV1,
  unsigned int nV2
){
  int mx = nPivot;
  int i;





  if( nV1>mx ) mx = nV1;
  if( nV2>mx ) mx = nV2;




  for(i=0; i<mx; i++){
    if( i<nPivot ){
      tclLineOfText(p->pOut, p->pPivot);
    }else{
      blob_append_char(p->pOut, '.');
    }
    blob_append_char(p->pOut, ' ');
    if( i<nV1 ){
      tclLineOfText(p->pOut, p->pV1);
    }else{
      blob_append_char(p->pOut, '.');
    }
    blob_append_char(p->pOut, ' ');
    if( i<nV2 ){
      tclLineOfText(p->pOut, p->pV2);
    }else{
      blob_append_char(p->pOut, '.');
    }





    blob_append(p->pOut, " X\n", 3);
  }





  p->lnPivot += nPivot;
  p->lnV1 += nV1;
  p->lnV2 += nV2;
}
void mergebuilder_init_tcl(MergeBuilder *p){
  mergebuilder_init(p);
  p->xStart = tclStart;







|








|
>










|











|

|



|





|











|

|



|





|











|

|



|





|
















>
>
>
>
>


>
>
>
>


|





|





|



>
>
>
>
>
|
|
>
>
>
>
>







511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
}
static void tclSame(MergeBuilder *p, unsigned int N){
  int i = 0;
  int nSkip;

  if( p->lnPivot>=2 || p->lnV1>2 || p->lnV2>2 ){
    while( i<N && i<p->nContext ){
      tclLineOfText(p->pOut, p->pPivot, 'T');
      blob_append(p->pOut, " 1 1 1\n", 7);
      i++;
    }
    nSkip = N - p->nContext*2;
  }else{
    nSkip = N - p->nContext;
  }
  if( nSkip>0 ){
    blob_appendf(p->pOut, "\"S%d %d %d %d\" . . .\n",
                 nSkip, nSkip, nSkip, nSkip);
    blob_copy_lines(0, p->pPivot, nSkip);
    i += nSkip;
  }

  p->lnPivot += N;
  p->lnV1 += N;
  p->lnV2 += N;

  if( p->lnPivot<p->mxPivot || p->lnV1<p->mxV1 || p->lnV2<p->mxV2 ){
    while( i<N ){
      tclLineOfText(p->pOut, p->pPivot, 'T');
      blob_append(p->pOut, " 1 1 1\n", 7);
      i++;
    }
  }

  blob_copy_lines(0, p->pV1, N);
  blob_copy_lines(0, p->pV2, N);
}
static void tclChngV1(MergeBuilder *p, unsigned int nPivot, unsigned int nV1){
  int i;
  for(i=0; i<nPivot && i<nV1; i++){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append_char(p->pOut, ' ');
    tclLineOfText(p->pOut, p->pV1, 'T');
    blob_append(p->pOut, " 1 2\n", 5);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append(p->pOut, " . 1 .\n", 7);
    i++;
  }
  while( i<nV1 ){
    blob_append(p->pOut, ". ", 2);
    tclLineOfText(p->pOut, p->pV1, 'T');
    blob_append(p->pOut, " . 2\n", 5);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nV1;
  p->lnV2 += nPivot;
  blob_copy_lines(0, p->pV2, nPivot);
}
static void tclChngV2(MergeBuilder *p, unsigned int nPivot, unsigned int nV2){
  int i;
  for(i=0; i<nPivot && i<nV2; i++){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append(p->pOut, " 1 ", 3);
    tclLineOfText(p->pOut, p->pV2, 'T');
    blob_append(p->pOut, " 3\n", 3);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append(p->pOut, " 1 . .\n", 7);
    i++;
  }
  while( i<nV2 ){
    blob_append(p->pOut, ". . ", 4);
    tclLineOfText(p->pOut, p->pV2, 'T');
    blob_append(p->pOut, " 3\n", 3);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nPivot;
  p->lnV2 += nV2;
  blob_copy_lines(0, p->pV1, nPivot);
}
static void tclChngBoth(MergeBuilder *p, unsigned int nPivot, unsigned int nV){
  int i;
  for(i=0; i<nPivot && i<nV; i++){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append_char(p->pOut, ' ');
    tclLineOfText(p->pOut, p->pV1, 'T');
    blob_append(p->pOut, " 2 2\n", 5);
  }
  while( i<nPivot ){
    tclLineOfText(p->pOut, p->pPivot, 'T');
    blob_append(p->pOut, " . . .\n", 7);
    i++;
  }
  while( i<nV ){
    blob_append(p->pOut, ". ", 2);
    tclLineOfText(p->pOut, p->pV1, 'T');
    blob_append(p->pOut, " 2 2\n", 5);
    i++;
  }
  p->lnPivot += nPivot;
  p->lnV1 += nV;
  p->lnV2 += nV;
  blob_copy_lines(0, p->pV2, nV);
}
static void tclConflict(
  MergeBuilder *p,
  unsigned int nPivot,
  unsigned int nV1,
  unsigned int nV2
){
  int mx = nPivot;
  int i;
  int nRes;
  Blob res;
  
  merge_try_to_resolve_conflict(p, nPivot, nV1, nV2, &res);
  nRes = blob_linecount(&res);
  if( nV1>mx ) mx = nV1;
  if( nV2>mx ) mx = nV2;
  if( nRes>mx ) mx = nRes;
  if( nRes>0 ){
    blob_appendf(p->pOut, "\"S0 0 0 %d\" . . .\n", nV2+2);
  }
  for(i=0; i<mx; i++){
    if( i<nPivot ){
      tclLineOfText(p->pOut, p->pPivot, 'X');
    }else{
      blob_append_char(p->pOut, '.');
    }
    blob_append_char(p->pOut, ' ');
    if( i<nV1 ){
      tclLineOfText(p->pOut, p->pV1, 'X');
    }else{
      blob_append_char(p->pOut, '.');
    }
    blob_append_char(p->pOut, ' ');
    if( i<nV2 ){
      tclLineOfText(p->pOut, p->pV2, 'X');
    }else{
      blob_append_char(p->pOut, '.');
    }
    if( i<nRes ){
      blob_append_char(p->pOut, ' ');
      tclLineOfText(p->pOut, &res, 'X');
      blob_append_char(p->pOut, '\n');
    }else{
      blob_append(p->pOut, " .\n", 3);
    }
    if( i==mx-1 ){
      blob_appendf(p->pOut, "\"S0 0 0 %d\" . . .\n", nPivot+nV1+3);
    }
  }
  blob_reset(&res);
  p->lnPivot += nPivot;
  p->lnV1 += nV1;
  p->lnV2 += nV2;
}
void mergebuilder_init_tcl(MergeBuilder *p){
  mergebuilder_init(p);
  p->xStart = tclStart;
635
636
637
638
639
640
641

642
643
644
645
646
647
648
  ** an array of integer triples.  Within each triple, the first integer
  ** is the number of lines of text to copy directly from the pivot,
  ** the second integer is the number of lines of text to omit from the
  ** pivot, and the third integer is the number of lines of text that are
  ** inserted.  The edit array ends with a triple of 0,0,0.
  */
  diff_config_init(&DCfg, 0);

  aC1 = text_diff(p->pPivot, p->pV1, 0, &DCfg);
  aC2 = text_diff(p->pPivot, p->pV2, 0, &DCfg);
  if( aC1==0 || aC2==0 ){
    free(aC1);
    free(aC2);
    return -1;
  }







>







757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
  ** an array of integer triples.  Within each triple, the first integer
  ** is the number of lines of text to copy directly from the pivot,
  ** the second integer is the number of lines of text to omit from the
  ** pivot, and the third integer is the number of lines of text that are
  ** inserted.  The edit array ends with a triple of 0,0,0.
  */
  diff_config_init(&DCfg, 0);
  DCfg.diffFlags = p->diffFlags;
  aC1 = text_diff(p->pPivot, p->pV1, 0, &DCfg);
  aC2 = text_diff(p->pPivot, p->pV2, 0, &DCfg);
  if( aC1==0 || aC2==0 ){
    free(aC1);
    free(aC2);
    return -1;
  }
756
757
758
759
760
761
762

763
764
765
766
767
768
769
770
  int i, j;
  int len = (int)strlen(mergeMarker[0]);
  const char *z = blob_buffer(p);
  int n = blob_size(p) - len + 1;
  assert( len==(int)strlen(mergeMarker[1]) );
  assert( len==(int)strlen(mergeMarker[2]) );
  assert( len==(int)strlen(mergeMarker[3]) );

  assert( count(mergeMarker)==4 );
  for(i=0; i<n; ){
    for(j=0; j<4; j++){
      if( (memcmp(&z[i], mergeMarker[j], len)==0) ){
        return 1;
      }
    }
    while( i<n && z[i]!='\n' ){ i++; }







>
|







879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
  int i, j;
  int len = (int)strlen(mergeMarker[0]);
  const char *z = blob_buffer(p);
  int n = blob_size(p) - len + 1;
  assert( len==(int)strlen(mergeMarker[1]) );
  assert( len==(int)strlen(mergeMarker[2]) );
  assert( len==(int)strlen(mergeMarker[3]) );
  assert( len==(int)strlen(mergeMarker[4]) );
  assert( count(mergeMarker)==5 );
  for(i=0; i<n; ){
    for(j=0; j<4; j++){
      if( (memcmp(&z[i], mergeMarker[j], len)==0) ){
        return 1;
      }
    }
    while( i<n && z[i]!='\n' ){ i++; }
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
  return blob_str(&x);
}

#if INTERFACE
/*
** Flags to the 3-way merger
*/
#define MERGE_DRYRUN  0x0001
/*
** The MERGE_KEEP_FILES flag specifies that merge_3way() should retain
** its temporary files on error. By default they are removed after the
** merge, regardless of success or failure.
*/
#define MERGE_KEEP_FILES 0x0002
#endif


/*
** This routine is a wrapper around merge_three_blobs() with the following
** enhancements:
**







|





|







1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
  return blob_str(&x);
}

#if INTERFACE
/*
** Flags to the 3-way merger
*/
#define MERGE_DRYRUN          0x0001
/*
** The MERGE_KEEP_FILES flag specifies that merge_3way() should retain
** its temporary files on error. By default they are removed after the
** merge, regardless of success or failure.
*/
#define MERGE_KEEP_FILES      0x0002
#endif


/*
** This routine is a wrapper around merge_three_blobs() with the following
** enhancements:
**