Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Merge trunk
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | andygoth-import
Files: files | file ages | folders
SHA1:6164dac54c3a135fd76b5d06d2b8b58139dd840b
User & Date: andygoth 2016-05-23 15:37:18
Context
2016-06-10
14:43
Enhancements to SVN import check-in: fa9ead30 user: drh tags: trunk
2016-05-23
15:37
Merge trunk Closed-Leaf check-in: 6164dac5 user: andygoth tags: andygoth-import
01:05
Add brief documentation for compiling and using encrypted repositories. check-in: 893905c8 user: drh tags: trunk
2016-05-14
13:10
Ignore Subversion property changes when importing. Property changes were previously interpreted as changing a file to be empty. check-in: ed7905b6 user: andygoth tags: andygoth-import
Changes

Changes to src/attach.c.

244
245
246
247
248
249
250
























































251
252
253
254
255
256
257
...
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
...
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375

376
377
378
379
380
381
382
...
668
669
670
671
672
673
674





































































































    rid = content_put(pAttach);
    db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d);", rid);
    db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", rid);
  }
  manifest_crosslink(rid, pAttach, MC_NONE);
}


























































/*
** WEBPAGE: attachadd
** Add a new attachment.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
................................................................................
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'", zTechNote) ){
      zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag"
                             " WHERE tagname GLOB 'event-%q*'", zTechNote);
      if( zTechNote==0) fossil_redirect_home();
    }
    zTarget = zTechNote;
    zTargetType = mprintf("Tech Note <a href=\"%R/technote/%h\">%h</a>",
                           zTechNote, zTechNote);
  
  }else{
    if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndTkt && g.anon.Attach);
      return;
    }
................................................................................
                          zTkt, zTkt);
  }
  if( zFrom==0 ) zFrom = mprintf("%s/home", g.zTop);
  if( P("cancel") ){
    cgi_redirect(zFrom);
  }
  if( P("ok") && szContent>0 && (goodCaptcha = captcha_is_correct()) ){
    Blob content;
    Blob manifest;
    Blob cksum;
    char *zUUID;
    const char *zComment;
    char *zDate;
    int rid;
    int i, n;
    int addCompress = 0;
    Manifest *pManifest;
    int needModerator;

    db_begin_transaction();
    blob_init(&content, aContent, szContent);
    pManifest = manifest_parse(&content, 0, 0);
    manifest_destroy(pManifest);
    blob_init(&content, aContent, szContent);
    if( pManifest ){
      blob_compress(&content, &content);
      addCompress = 1;
    }
    needModerator =
         (zTkt!=0 && ticket_need_moderation(0)) ||
         (zPage!=0 && wiki_need_moderation(0));
    rid = content_put_ex(&content, 0, 0, 0, needModerator);
    zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
    blob_zero(&manifest);
    for(i=n=0; zName[i]; i++){
      if( zName[i]=='/' || zName[i]=='\\' ) n = i;
    }
    zName += n;
    if( zName[0]==0 ) zName = "unknown";
    blob_appendf(&manifest, "A %F%s %F %s\n",
                 zName, addCompress ? ".gz" : "", zTarget, zUUID);
    zComment = PD("comment", "");
    while( fossil_isspace(zComment[0]) ) zComment++;
    n = strlen(zComment);
    while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; }
    if( n>0 ){
      blob_appendf(&manifest, "C %#F\n", n, zComment);
    }
    zDate = date_in_standard_format("now");
    blob_appendf(&manifest, "D %s\n", zDate);
    blob_appendf(&manifest, "U %F\n", login_name());
    md5sum_blob(&manifest, &cksum);
    blob_appendf(&manifest, "Z %b\n", &cksum);
    attach_put(&manifest, rid, needModerator);
    assert( blob_is_reset(&manifest) );
    db_end_transaction(0);

    cgi_redirect(zFrom);
  }
  style_header("Add Attachment");
  if( !goodCaptcha ){
    @ <p class="generalError">Error: Incorrect security code.</p>
  }
  @ <h2>Add Attachment To %s(zTargetType)</h2>
................................................................................
  }
  if( cnt ){
    @ </ul>
  }
  db_finalize(&q);

}












































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







|







 







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
|
<
<
<
<
<
<
<
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
>







 







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
...
376
377
378
379
380
381
382






















383
384










385














386
387
388
389
390
391
392
393
...
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
    rid = content_put(pAttach);
    db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d);", rid);
    db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", rid);
  }
  manifest_crosslink(rid, pAttach, MC_NONE);
}


/*
** Commit a new attachment into the repository
*/
void attach_commit(
  const char *zName,                   /* The filename of the attachment */
  const char *zTarget,                 /* The artifact uuid to attach to */
  const char *aContent,                /* The content of the attachment */
  int         szContent,               /* The length of the attachment */
  int         needModerator,           /* Moderate the attachment? */
  const char *zComment                 /* The comment for the attachment */
){
    Blob content;
    Blob manifest;
    Blob cksum;
    char *zUUID;
    char *zDate;
    int rid;
    int i, n;
    int addCompress = 0;
    Manifest *pManifest;

    db_begin_transaction();
    blob_init(&content, aContent, szContent);
    pManifest = manifest_parse(&content, 0, 0);
    manifest_destroy(pManifest);
    blob_init(&content, aContent, szContent);
    if( pManifest ){
      blob_compress(&content, &content);
      addCompress = 1;
    }
    rid = content_put_ex(&content, 0, 0, 0, needModerator);
    zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
    blob_zero(&manifest);
    for(i=n=0; zName[i]; i++){
      if( zName[i]=='/' || zName[i]=='\\' ) n = i+1;
    }
    zName += n;
    if( zName[0]==0 ) zName = "unknown";
    blob_appendf(&manifest, "A %F%s %F %s\n",
                 zName, addCompress ? ".gz" : "", zTarget, zUUID);
    while( fossil_isspace(zComment[0]) ) zComment++;
    n = strlen(zComment);
    while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; }
    if( n>0 ){
      blob_appendf(&manifest, "C %#F\n", n, zComment);
    }
    zDate = date_in_standard_format("now");
    blob_appendf(&manifest, "D %s\n", zDate);
    blob_appendf(&manifest, "U %F\n", login_name());
    md5sum_blob(&manifest, &cksum);
    blob_appendf(&manifest, "Z %b\n", &cksum);
    attach_put(&manifest, rid, needModerator);
    assert( blob_is_reset(&manifest) );
    db_end_transaction(0);
}

/*
** WEBPAGE: attachadd
** Add a new attachment.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
................................................................................
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'", zTechNote) ){
      zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag"
                             " WHERE tagname GLOB 'event-%q*'", zTechNote);
      if( zTechNote==0) fossil_redirect_home();
    }
    zTarget = zTechNote;
    zTargetType = mprintf("Tech Note <a href=\"%R/technote/%s\">%S</a>",
                           zTechNote, zTechNote);
  
  }else{
    if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndTkt && g.anon.Attach);
      return;
    }
................................................................................
                          zTkt, zTkt);
  }
  if( zFrom==0 ) zFrom = mprintf("%s/home", g.zTop);
  if( P("cancel") ){
    cgi_redirect(zFrom);
  }
  if( P("ok") && szContent>0 && (goodCaptcha = captcha_is_correct()) ){






















    int needModerator = (zTkt!=0 && ticket_need_moderation(0)) ||
                        (zPage!=0 && wiki_need_moderation(0));










    const char *zComment = PD("comment", "");














    attach_commit(zName, zTarget, aContent, szContent, needModerator, zComment);
    cgi_redirect(zFrom);
  }
  style_header("Add Attachment");
  if( !goodCaptcha ){
    @ <p class="generalError">Error: Incorrect security code.</p>
  }
  @ <h2>Add Attachment To %s(zTargetType)</h2>
................................................................................
  }
  if( cnt ){
    @ </ul>
  }
  db_finalize(&q);

}

/*
** COMMAND: attachment*
**
** Usage: %fossil attachment add ?PAGENAME? FILENAME ?OPTIONS?
**
**       Add an attachment to an existing wiki page or tech note.
**
**       Options:
**         -t|--technote DATETIME      Specifies the timestamp of
**                                     the technote to which the attachment
**                                     is to be made. The attachment will be
**                                     to the most recently modified tech note
**                                     with the specified timestamp.
**         -t|--technote TECHNOTE-ID   Specifies the technote to be
**                                     updated by its technote id.
**
**       One of PAGENAME, DATETIME or TECHNOTE-ID must be specified.
*/
void attachment_cmd(void){
  int n;
  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto attachment_cmd_usage;
  }
  n = strlen(g.argv[2]);
  if( n==0 ){
    goto attachment_cmd_usage;
  }

  if( strncmp(g.argv[2],"add",n)==0 ){
    const char *zPageName;        /* Name of the wiki page to attach to */
    const char *zFile;            /* Name of the file to be attached */
    const char *zETime;           /* The name of the technote to attach to */
    Manifest *pWiki = 0;          /* Parsed wiki page content */
    char *zBody = 0;              /* Wiki page content */
    int rid;
    const char *zTarget;          /* Target of the attachment */
    Blob content;                 /* The content of the attachment */
    zETime = find_option("technote","t",1);
    if( !zETime ){
      if( g.argc!=5 ){
        usage("add PAGENAME FILENAME");
      }
      zPageName = g.argv[3];
      rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x"
        " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
        " ORDER BY x.mtime DESC LIMIT 1",
        zPageName
      );        
      if( (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("wiki page [%s] not found",zPageName);
      }
      zTarget = zPageName;
      zFile = g.argv[4];
    }else{
      if( g.argc!=4 ){
        usage("add FILENAME --technote DATETIME|TECHNOTE-ID");
      }
      rid = wiki_technote_to_rid(zETime);
      if( rid<0 ){
        fossil_fatal("ambiguous tech note id: %s", zETime);
      }
      if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("technote [%s] not found",zETime);
      }
      zTarget = db_text(0,
        "SELECT substr(tagname,7) FROM tag WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')",
        rid
      );
      zFile = g.argv[3];
    }
    blob_read_from_file(&content, zFile);
    user_select();
    attach_commit(
      zFile,                   /* The filename of the attachment */
      zTarget,                 /* The artifact uuid to attach to */
      blob_buffer(&content),   /* The content of the attachment */
      blob_size(&content),     /* The length of the attachment */
      0,                       /* No need to moderate the attachment */
      ""                       /* Empty attachment comment */
    );
    if( !zETime ){
      fossil_print("Attached %s to wiki page %s.\n", zFile, zPageName);
    }else{
      fossil_print("Attached %s to tech note %s.\n", zFile, zETime);
    }
  }else{
    goto attachment_cmd_usage;
  }
  return;

attachment_cmd_usage:
  usage("add ?PAGENAME? FILENAME [-t|--technote DATETIME ]");
}

Changes to src/checkin.c.

547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
...
633
634
635
636
637
638
639
640
641
642
643
644
645
646

647






648


649



650
651
652
653
654
655
656

657
658
659
660
661
662
663
664
665
...
676
677
678
679
680
681
682
683

684
685
686
687
688
689
690
}

/*
** COMMAND: extras
** 
** Usage: %fossil extras ?OPTIONS? ?PATH1 ...?
**
** Print a list of all files in the source tree that are not part of
** the current checkout.  See also the "clean" command. If paths are
** specified, only files in the given directories will be listed.
**
** Files and subdirectories whose names begin with "." are normally
** ignored but can be included by adding the --dotfiles option.
**
** The GLOBPATTERN is a comma-separated list of GLOB expressions for
** files that are ignored.  The GLOBPATTERN specified by the "ignore-glob"
** is used if the --ignore option is omitted.
**
** Pathnames are displayed according to the "relative-paths" setting,
** unless overridden by the --abs-paths or --rel-paths options.
**
** Options:
**    --abs-paths      Display absolute pathnames.
**    --case-sensitive <BOOL> override case-sensitive setting
................................................................................
}

/*
** COMMAND: clean
** 
** Usage: %fossil clean ?OPTIONS? ?PATH ...?
**
** Delete all "extra" files in the source tree.  "Extra" files are
** files that are not officially part of the checkout. This operation
** cannot be undone. If one or more PATH arguments appear, then only
** the files named, or files contained with directories named, will be
** removed.
**
** Prompted are issued to confirm the removal of each file, unless

** the --force flag is used or unless the file matches glob pattern






** specified by the --clean option.  No file that matches glob patterns


** specified by --ignore or --keep will ever be deleted. The default



** values for --clean, --ignore, and --keep are determined by the
** (versionable) clean-glob, ignore-glob, and keep-glob settings.
** Files and subdirectories whose names begin with "." are automatically
** ignored unless the --dotfiles option is used.
**
** The --verily option ignores the keep-glob and ignore-glob settings
** and turns on --force, --dotfiles, and --emptydirs.  Use the --verily

** option when you really want to clean up everything.  Extreme care
** should be exercised when using the --verily option.
**
** Options:
**    --allckouts      Check for empty directories within any checkouts
**                     that may be nested within the current one.  This
**                     option should be used with great care because the
**                     empty-dirs setting (and other applicable settings)
**                     belonging to the other repositories, if any, will
................................................................................
**                     explicitly exempted via the empty-dirs setting
**                     or another applicable setting or command line
**                     argument.  Matching files, if any, are removed
**                     prior to checking for any empty directories;
**                     therefore, directories that contain only files
**                     that were removed will be removed as well.
**    -f|--force       Remove files without prompting.
**    -i|--prompt      Prompt before removing each file.

**    -x|--verily      WARNING: Removes everything that is not a managed
**                     file or the repository itself.  This option
**                     implies the --force, --emptydirs, --dotfiles, and
**                     --disable-undo options.  Furthermore, it completely
**                     disregards the keep-glob and ignore-glob settings.
**                     However, it does honor the --ignore and --keep
**                     options.







|
|
|




|
|
|







 







|
|
|
|
<

|
>
|
>
>
>
>
>
>
|
>
>
|
>
>
>
|
|
<
<

|
<
>
|
|







 







|
>







547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
...
633
634
635
636
637
638
639
640
641
642
643

644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662


663
664

665
666
667
668
669
670
671
672
673
674
...
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
}

/*
** COMMAND: extras
** 
** Usage: %fossil extras ?OPTIONS? ?PATH1 ...?
**
** Print a list of all files in the source tree that are not part of the
** current checkout. See also the "clean" command. If paths are specified,
** only files in the given directories will be listed.
**
** Files and subdirectories whose names begin with "." are normally
** ignored but can be included by adding the --dotfiles option.
**
** Files whose names match any of the glob patterns in the "ignore-glob"
** setting are ignored. This setting can be overridden by the --ignore
** option, whose CSG argument is a comma-separated list of glob patterns.
**
** Pathnames are displayed according to the "relative-paths" setting,
** unless overridden by the --abs-paths or --rel-paths options.
**
** Options:
**    --abs-paths      Display absolute pathnames.
**    --case-sensitive <BOOL> override case-sensitive setting
................................................................................
}

/*
** COMMAND: clean
** 
** Usage: %fossil clean ?OPTIONS? ?PATH ...?
**
** Delete all "extra" files in the source tree.  "Extra" files are files
** that are not officially part of the checkout.  If one or more PATH
** arguments appear, then only the files named, or files contained with
** directories named, will be removed.

**
** If the --prompt option is used, prompts are issued to confirm the
** permanent removal of each file.  Otherwise, files are backed up to the
** undo buffer prior to removal, and prompts are issued only for files
** whose removal cannot be undone due to their large size or due to
** --disable-undo being used.
** 
** The --force option treats all prompts as having been answered yes,
** whereas --no-prompt treats them as having been answered no.
** 
** Files matching any glob pattern specified by the --clean option are
** deleted without prompting, and the removal cannot be undone.
**
** No file that matches glob patterns specified by --ignore or --keep will
** ever be deleted.  Files and subdirectories whose names begin with "."
** are automatically ignored unless the --dotfiles option is used.
** 
** The default values for --clean, --ignore, and --keep are determined by
** the (versionable) clean-glob, ignore-glob, and keep-glob settings.


**
** The --verily option ignores the keep-glob and ignore-glob settings and

** turns on --force, --emptydirs, --dotfiles, and --disable-undo.  Use the
** --verily option when you really want to clean up everything.  Extreme
** care should be exercised when using the --verily option.
**
** Options:
**    --allckouts      Check for empty directories within any checkouts
**                     that may be nested within the current one.  This
**                     option should be used with great care because the
**                     empty-dirs setting (and other applicable settings)
**                     belonging to the other repositories, if any, will
................................................................................
**                     explicitly exempted via the empty-dirs setting
**                     or another applicable setting or command line
**                     argument.  Matching files, if any, are removed
**                     prior to checking for any empty directories;
**                     therefore, directories that contain only files
**                     that were removed will be removed as well.
**    -f|--force       Remove files without prompting.
**    -i|--prompt      Prompt before removing each file.  This option
**                     implies the --disable-undo option.
**    -x|--verily      WARNING: Removes everything that is not a managed
**                     file or the repository itself.  This option
**                     implies the --force, --emptydirs, --dotfiles, and
**                     --disable-undo options.  Furthermore, it completely
**                     disregards the keep-glob and ignore-glob settings.
**                     However, it does honor the --ignore and --keep
**                     options.

Changes to src/db.c.

2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
**    case-sensitive   If TRUE, the files whose names differ only in case
**                     are considered distinct.  If FALSE files whose names
**                     differ only in case are the same file.  Defaults to
**                     TRUE for unix and FALSE for Cygwin, Mac and Windows.
**
**    clean-glob       The VALUE is a comma or newline-separated list of GLOB
**     (versionable)   patterns specifying files that the "clean" command will
**                     delete without prompting even when the -force flag has
**                     not been used.  Example:  *.a *.lib *.o
**
**    clearsign        When enabled, fossil will attempt to sign all commits
**                     with gpg.  When disabled (the default), commits will
**                     be unsigned.  Default: off
**
**    crnl-glob        A comma or newline-separated list of GLOB patterns for
**     (versionable)   text files in which it is ok to have CR, CR+NL or mixed







|
|







2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
**    case-sensitive   If TRUE, the files whose names differ only in case
**                     are considered distinct.  If FALSE files whose names
**                     differ only in case are the same file.  Defaults to
**                     TRUE for unix and FALSE for Cygwin, Mac and Windows.
**
**    clean-glob       The VALUE is a comma or newline-separated list of GLOB
**     (versionable)   patterns specifying files that the "clean" command will
**                     delete without prompting or allowing undo.
**                     Example: *.a,*.lib,*.o
**
**    clearsign        When enabled, fossil will attempt to sign all commits
**                     with gpg.  When disabled (the default), commits will
**                     be unsigned.  Default: off
**
**    crnl-glob        A comma or newline-separated list of GLOB patterns for
**     (versionable)   text files in which it is ok to have CR, CR+NL or mixed

Changes to src/event.c.

538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
  @ </td></tr></table>
  @ </div></form>
  style_footer();
}

/*
** Add a new tech note to the repository.  The timestamp is
** given by the zETime parameter.  isNew must be true to create
** a new page.  If no previous page with the name zPageName exists
** and isNew is false, then this routine throws an error.
*/
void event_cmd_commit(
  char *zETime,             /* timestamp */
  int isNew,                /* true to create a new page */
  Blob *pContent,           /* content of the new page */
  const char *zMimeType,    /* mimetype of the content */
  const char *zComment,     /* comment to go on the timeline */
  const char *zTags,        /* tags */
  const char *zClr          /* background color */
){
  int rid;                /* Artifact id of the tech note */
  const char *zId;        /* id of the tech note */
  rid = db_int(0, "SELECT objid FROM event"
        " WHERE datetime(mtime)=datetime('%q') AND type = 'e'"
        " LIMIT 1",
        zETime
  );
  if( rid==0 && !isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
#endif
    fossil_fatal("no such tech note: %s", zETime);
  }
  if( rid!=0 && isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS;
#endif
    fossil_fatal("tech note %s already exists", zETime);
  }

  if ( isNew ){
    zId = db_text(0, "SELECT lower(hex(randomblob(20)))");
  }else{
    zId = db_text(0,
      "SELECT substr(tagname,7) FROM tag"
      " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')",
      rid
    );







|





|






<

<
<
<
<
<
<
<
<
<
<
|
<
<
<
<
<
<
<
|







538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557

558










559







560
561
562
563
564
565
566
567
  @ </td></tr></table>
  @ </div></form>
  style_footer();
}

/*
** Add a new tech note to the repository.  The timestamp is
** given by the zETime parameter.  rid must be zero to create
** a new page.  If no previous page with the name zPageName exists
** and isNew is false, then this routine throws an error.
*/
void event_cmd_commit(
  char *zETime,             /* timestamp */
  int   rid,                /* Artifact id of the tech note */
  Blob *pContent,           /* content of the new page */
  const char *zMimeType,    /* mimetype of the content */
  const char *zComment,     /* comment to go on the timeline */
  const char *zTags,        /* tags */
  const char *zClr          /* background color */
){

  const char *zId;        /* id of the tech note */


















  if ( rid==0 ){
    zId = db_text(0, "SELECT lower(hex(randomblob(20)))");
  }else{
    zId = db_text(0,
      "SELECT substr(tagname,7) FROM tag"
      " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')",
      rid
    );

Changes to src/export.c.

17
18
19
20
21
22
23


















24
25
26
27
28
29
30
..
94
95
96
97
98
99
100



























































































































































































101
102
103
104
105
106
107
...
145
146
147
148
149
150
151

152
153
154
155

156
157
158
159
160



161
162
163
164



165
166
167
168
169





170
171
172
173
174
175
176

177
178
179
180
181
182
183
...
247
248
249
250
251
252
253

254
255
256
257
258
259
260
261
262
263

264

265
266
267
268
269
270
271
272
273
274
275
276
277
278



279
280
281
282
283
284
285
286


287
288
289
290
291
292
293
...
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371


372
** This file contains code used to export the content of a Fossil
** repository in the git-fast-import format.
*/
#include "config.h"
#include "export.h"
#include <assert.h>



















/*
** Output a "committer" record for the given user.
*/
static void print_person(const char *zUser){
  static Stmt q;
  const char *zContact;
  char *zName;
................................................................................
  free(zName);
  free(zEmail);
  db_reset(&q);
}

#define BLOBMARK(rid)   ((rid) * 2)
#define COMMITMARK(rid) ((rid) * 2 + 1)




























































































































































































/*
** COMMAND: export
**
** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY?
**
** Write an export of all check-ins to standard output.  The export is
................................................................................

  db_find_and_open_repository(0, 2);
  verify_all_options();
  if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); }

  db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)");
  db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)");

  if( markfile_in!=0 ){
    Stmt qb,qc;
    char line[100];
    FILE *f;


    f = fossil_fopen(markfile_in, "r");
    if( f==0 ){
      fossil_fatal("cannot open %s for reading", markfile_in);
    }



    db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)");
    db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)");
    while( fgets(line, sizeof(line), f)!=0 ){
      if( *line == 'b' ){



        db_bind_text(&qb, ":rid", line + 1);
        db_step(&qb);
        db_reset(&qb);
        bag_insert(&blobs, atoi(line + 1));
      }else if( *line == 'c' ){





        db_bind_text(&qc, ":rid", line + 1);
        db_step(&qc);
        db_reset(&qc);
        bag_insert(&vers, atoi(line + 1));
      }else{
        fossil_fatal("bad input from %s: %s", markfile_in, line);
      }

    }
    db_finalize(&qb);
    db_finalize(&qc);
    fclose(f);
  }

  /* Step 1:  Generate "blob" records for every artifact that is part
................................................................................
    Stmt q4;
    const char *zSecondsSince1970 = db_column_text(&q, 0);
    int ckinId = db_column_int(&q, 1);
    const char *zComment = db_column_text(&q, 2);
    const char *zUser = db_column_text(&q, 3);
    const char *zBranch = db_column_text(&q, 4);
    char *zBr;


    bag_insert(&vers, ckinId);
    db_bind_int(&q2, ":rid", ckinId);
    db_step(&q2);
    db_reset(&q2);
    if( zBranch==0 ) zBranch = "trunk";
    zBr = mprintf("%s", zBranch);
    for(i=0; zBr[i]; i++){
      if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_';
    }

    printf("commit refs/heads/%s\nmark :%d\n", zBr, COMMITMARK(ckinId));

    free(zBr);
    printf("committer");
    print_person(zUser);
    printf(" %s +0000\n", zSecondsSince1970);
    if( zComment==0 ) zComment = "null comment";
    printf("data %d\n%s\n", (int)strlen(zComment), zComment);
    db_prepare(&q3,
      "SELECT pid FROM plink"
      " WHERE cid=%d AND isprim"
      "   AND pid IN (SELECT objid FROM event)",
      ckinId
    );
    if( db_step(&q3) == SQLITE_ROW ){
      printf("from :%d\n", COMMITMARK(db_column_int(&q3, 0)));



      db_prepare(&q4,
        "SELECT pid FROM plink"
        " WHERE cid=%d AND NOT isprim"
        "   AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)"
        " ORDER BY pid",
        ckinId);
      while( db_step(&q4)==SQLITE_ROW ){
        printf("merge :%d\n", COMMITMARK(db_column_int(&q4,0)));


      }
      db_finalize(&q4);
    }else{
      printf("deleteall\n");
    }

    db_prepare(&q4,
................................................................................
    }
    db_finalize(&q4);
    db_finalize(&q3);
    printf("\n");
  }
  db_finalize(&q2);
  db_finalize(&q);
  bag_clear(&blobs);
  manifest_cache_clear();


  /* Output tags */
  db_prepare(&q,
     "SELECT tagname, rid, strftime('%%s',mtime)"
     "  FROM tagxref JOIN tag USING(tagid)"
................................................................................
    printf("tag %s\n", zEncoded);
    printf("from :%d\n", COMMITMARK(rid));
    printf("tagger <tagger> %s +0000\n", zSecSince1970);
    printf("data 0\n");
    fossil_free(zEncoded);
  }
  db_finalize(&q);
  bag_clear(&vers);

  if( markfile_out!=0 ){
    FILE *f;
    f = fossil_fopen(markfile_out, "w");
    if( f == 0 ){
      fossil_fatal("cannot open %s for writing", markfile_out);
    }
    db_prepare(&q, "SELECT rid FROM oldblob");
    while( db_step(&q)==SQLITE_ROW ){
      fprintf(f, "b%d\n", db_column_int(&q, 0));
    }
    db_finalize(&q);
    db_prepare(&q, "SELECT rid FROM oldcommit");
    while( db_step(&q)==SQLITE_ROW ){
      fprintf(f, "c%d\n", db_column_int(&q, 0));
    }
    db_finalize(&q);
    if( ferror(f)!=0 || fclose(f)!=0 ) {
      fossil_fatal("error while writing %s", markfile_out);
    }
  }


}







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>


<

>





>
>
>


<
<
>
>
>
|


<
<
>
>
>
>
>
|


<
<
<
<
>







 







>










>
|
>













|
>
>
>







|
>
>







 







<







 







<







|
<
<
<
<
<
<
<
<
<




>
>

17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
...
350
351
352
353
354
355
356
357
358
359

360
361
362
363
364
365
366
367
368
369
370
371


372
373
374
375
376
377


378
379
380
381
382
383
384
385




386
387
388
389
390
391
392
393
...
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
...
532
533
534
535
536
537
538

539
540
541
542
543
544
545
...
560
561
562
563
564
565
566

567
568
569
570
571
572
573
574









575
576
577
578
579
580
581
** This file contains code used to export the content of a Fossil
** repository in the git-fast-import format.
*/
#include "config.h"
#include "export.h"
#include <assert.h>

#if INTERFACE
/*
** struct mark_t
**   holds information for translating between git commits
**   and fossil commits.
**   -git_name: This is the mark name that identifies the commit to git.
**              It will always begin with a ':'.
**   -rid: The unique object ID that identifies this commit within the
**         repository database.
**   -uuid: The SHA-1 of artifact corresponding to rid.
*/
struct mark_t{
  char *name;
  int rid;
  char uuid[41];
};
#endif

/*
** Output a "committer" record for the given user.
*/
static void print_person(const char *zUser){
  static Stmt q;
  const char *zContact;
  char *zName;
................................................................................
  free(zName);
  free(zEmail);
  db_reset(&q);
}

#define BLOBMARK(rid)   ((rid) * 2)
#define COMMITMARK(rid) ((rid) * 2 + 1)

/*
** insert_commit_xref()
**   Insert a new (mark,rid,uuid) entry into the 'xmark' table.
**   zName and zUuid must be non-null and must point to NULL-terminated strings.
*/
void insert_commit_xref(int rid, const char *zName, const char *zUuid){
  db_multi_exec(
    "INSERT OR IGNORE INTO xmark(tname, trid, tuuid)"
    "VALUES(%Q,%d,%Q)",
    zName, rid, zUuid
  );
}

/*
** create_mark()
**   Create a new (mark,rid,uuid) entry for the given rid in the 'xmark' table,
**   and return that information as a struct mark_t in *mark.
**   This function returns -1 in the case where 'rid' does not exist, otherwise
**   it returns 0.
**   mark->name is dynamically allocated and is owned by the caller upon return.
*/
int create_mark(int rid, struct mark_t *mark){
  char sid[13];
  char *zUuid = rid_to_uuid(rid);
  if(!zUuid){
    fossil_trace("Undefined rid=%d\n", rid);
    return -1;
  }
  mark->rid = rid;
  sprintf(sid, ":%d", COMMITMARK(rid));
  mark->name = fossil_strdup(sid);
  strcpy(mark->uuid, zUuid);
  free(zUuid);
  insert_commit_xref(mark->rid, mark->name, mark->uuid);
  return 0;
}

/*
** mark_name_from_rid()
**   Find the mark associated with the given rid.  Mark names always start
**   with ':', and are pulled from the 'xmark' temporary table.
**   This function returns NULL if the rid does not exist in the 'xmark' table.
**   Otherwise, it returns the name of the mark, which is dynamically allocated
**   and is owned by the caller of this function.
*/
char * mark_name_from_rid(int rid){
  char *zMark = db_text(0, "SELECT tname FROM xmark WHERE trid=%d", rid);
  if(zMark==NULL){
    struct mark_t mark;
    if(create_mark(rid, &mark)==0){
      zMark = mark.name;
    }else{
      return NULL;
    }
  }
  return zMark;
}

/*
** parse_mark()
**   Create a new (mark,rid,uuid) entry in the 'xmark' table given a line
**   from a marks file.  Return the cross-ref information as a struct mark_t
**   in *mark.
**   This function returns -1 in the case that the line is blank, malformed, or
**   the rid/uuid named in 'line' does not match what is in the repository
**   database.  Otherwise, 0 is returned.
**   mark->name is dynamically allocated, and owned by the caller.
*/
int parse_mark(char *line, struct mark_t *mark){
  char *cur_tok;
  cur_tok = strtok(line, " \t");
  if(!cur_tok||strlen(cur_tok)<2){
    return -1;
  }
  mark->rid = atoi(&cur_tok[1]);
  if(cur_tok[0]!='c'){
    /* This is probably a blob mark */
    mark->name = NULL;
    return 0;
  }

  cur_tok = strtok(NULL, " \t");
  if(!cur_tok){
    /* This mark was generated by an older version of Fossil and doesn't
    ** include the mark name and uuid.  create_mark() will name the new mark
    ** exactly as it was when exported to git, so that we should have a
    ** valid mapping from git sha1<->mark name<->fossil sha1. */
    return create_mark(mark->rid, mark);
  }else{
    mark->name = fossil_strdup(cur_tok);
  }

  cur_tok = strtok(NULL, "\n");
  if(!cur_tok||strlen(cur_tok)!=40){
    free(mark->name);
    fossil_trace("Invalid SHA-1 in marks file: %s\n", cur_tok);
    return -1;
  }else{
    strcpy(mark->uuid, cur_tok);
  }

  /* make sure that rid corresponds to UUID */
  if(fast_uuid_to_rid(mark->uuid)!=mark->rid){
    free(mark->name);
    fossil_trace("Non-existent SHA-1 in marks file: %s\n", mark->uuid);
    return -1;
  }

  /* insert a cross-ref into the 'xmark' table */
  insert_commit_xref(mark->rid, mark->name, mark->uuid);
  return 0;
}

/*
** import_marks()
**   Import the marks specified in file 'f' into the 'xmark' table.
**   If 'blobs' is non-null, insert all blob marks into it.
**   If 'vers' is non-null, insert all commit marks into it.
**   Each line in the file must be at most 100 characters in length.  This
**   seems like a reasonable maximum for a 40-character uuid, and 1-13
**   character rid.
**   The function returns -1 if any of the lines in file 'f' are malformed,
**   or the rid/uuid information doesn't match what is in the repository
**   database.  Otherwise, 0 is returned.
*/
int import_marks(FILE* f, Bag *blobs, Bag *vers){
  char line[101];
  while(fgets(line, sizeof(line), f)){
    struct mark_t mark;
    if(strlen(line)==100&&line[99]!='\n'){
      /* line too long */
      return -1;
    }
    if( parse_mark(line, &mark)<0 ){
      return -1;
    }else if( line[0]=='b' ){
      /* Don't import blob marks into 'xmark' table--git doesn't use them,
      ** so they need to be left free for git to reuse. */
      if(blobs!=NULL){
        bag_insert(blobs, mark.rid);
      }
    }else if( vers!=NULL ){
      bag_insert(vers, mark.rid);
    }
    free(mark.name);
  }
  return 0;
}

/*
**  If 'blobs' is non-null, it must point to a Bag of blob rids to be
**  written to disk.  Blob rids are written as 'b<rid>'.
**  If 'vers' is non-null, it must point to a Bag of commit rids to be
**  written to disk.  Commit rids are written as 'c<rid> :<mark> <uuid>'.
**  All commit (mark,rid,uuid) tuples are stored in 'xmark' table.
**  This function does not fail, but may produce errors if a uuid cannot
**  be found for an rid in 'vers'.
*/
void export_marks(FILE* f, Bag *blobs, Bag *vers){
  int rid;
  if( blobs!=NULL ){
    rid = bag_first(blobs);
    if(rid!=0){
      do{
        fprintf(f, "b%d\n", rid);
      }while((rid = bag_next(blobs, rid))!=0);
    }
  }
  if( vers!=NULL ){
    rid = bag_first(vers);
    if( rid!=0 ){
      do{
        char *zUuid = rid_to_uuid(rid);
        char *zMark;
        if(zUuid==NULL){
          fossil_trace("No uuid matching rid=%d when exporting marks\n", rid);
          continue;
        }
        zMark = mark_name_from_rid(rid);
        fprintf(f, "c%d %s %s\n", rid, zMark, zUuid);
        free(zMark);
        free(zUuid);
      }while( (rid = bag_next(vers, rid))!=0 );
    }
  }
}

/*
** COMMAND: export
**
** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY?
**
** Write an export of all check-ins to standard output.  The export is
................................................................................

  db_find_and_open_repository(0, 2);
  verify_all_options();
  if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); }

  db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)");
  db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)");
  db_multi_exec("CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT)");
  if( markfile_in!=0 ){
    Stmt qb,qc;

    FILE *f;
    int rid;

    f = fossil_fopen(markfile_in, "r");
    if( f==0 ){
      fossil_fatal("cannot open %s for reading", markfile_in);
    }
    if(import_marks(f, &blobs, &vers)<0){
      fossil_fatal("error importing marks from file: %s\n", markfile_in);
    }
    db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)");
    db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)");


    rid = bag_first(&blobs);
    if(rid!=0){
      do{
        db_bind_int(&qb, ":rid", rid);
        db_step(&qb);
        db_reset(&qb);


      }while((rid = bag_next(&blobs, rid))!=0);
    }
    rid = bag_first(&vers);
    if(rid!=0){
      do{
        db_bind_int(&qc, ":rid", rid);
        db_step(&qc);
        db_reset(&qc);




      }while((rid = bag_next(&vers, rid))!=0);
    }
    db_finalize(&qb);
    db_finalize(&qc);
    fclose(f);
  }

  /* Step 1:  Generate "blob" records for every artifact that is part
................................................................................
    Stmt q4;
    const char *zSecondsSince1970 = db_column_text(&q, 0);
    int ckinId = db_column_int(&q, 1);
    const char *zComment = db_column_text(&q, 2);
    const char *zUser = db_column_text(&q, 3);
    const char *zBranch = db_column_text(&q, 4);
    char *zBr;
    char *zMark;

    bag_insert(&vers, ckinId);
    db_bind_int(&q2, ":rid", ckinId);
    db_step(&q2);
    db_reset(&q2);
    if( zBranch==0 ) zBranch = "trunk";
    zBr = mprintf("%s", zBranch);
    for(i=0; zBr[i]; i++){
      if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_';
    }
    zMark = mark_name_from_rid(ckinId);
    printf("commit refs/heads/%s\nmark %s\n", zBr, zMark);
    free(zMark);
    free(zBr);
    printf("committer");
    print_person(zUser);
    printf(" %s +0000\n", zSecondsSince1970);
    if( zComment==0 ) zComment = "null comment";
    printf("data %d\n%s\n", (int)strlen(zComment), zComment);
    db_prepare(&q3,
      "SELECT pid FROM plink"
      " WHERE cid=%d AND isprim"
      "   AND pid IN (SELECT objid FROM event)",
      ckinId
    );
    if( db_step(&q3) == SQLITE_ROW ){
      int pid = db_column_int(&q3, 0);
      zMark = mark_name_from_rid(pid);
      printf("from %s\n", zMark);
      free(zMark);
      db_prepare(&q4,
        "SELECT pid FROM plink"
        " WHERE cid=%d AND NOT isprim"
        "   AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)"
        " ORDER BY pid",
        ckinId);
      while( db_step(&q4)==SQLITE_ROW ){
        zMark = mark_name_from_rid(db_column_int(&q4, 0));
        printf("merge %s\n", zMark);
        free(zMark);
      }
      db_finalize(&q4);
    }else{
      printf("deleteall\n");
    }

    db_prepare(&q4,
................................................................................
    }
    db_finalize(&q4);
    db_finalize(&q3);
    printf("\n");
  }
  db_finalize(&q2);
  db_finalize(&q);

  manifest_cache_clear();


  /* Output tags */
  db_prepare(&q,
     "SELECT tagname, rid, strftime('%%s',mtime)"
     "  FROM tagxref JOIN tag USING(tagid)"
................................................................................
    printf("tag %s\n", zEncoded);
    printf("from :%d\n", COMMITMARK(rid));
    printf("tagger <tagger> %s +0000\n", zSecSince1970);
    printf("data 0\n");
    fossil_free(zEncoded);
  }
  db_finalize(&q);


  if( markfile_out!=0 ){
    FILE *f;
    f = fossil_fopen(markfile_out, "w");
    if( f == 0 ){
      fossil_fatal("cannot open %s for writing", markfile_out);
    }
    export_marks(f, &blobs, &vers);









    if( ferror(f)!=0 || fclose(f)!=0 ) {
      fossil_fatal("error while writing %s", markfile_out);
    }
  }
  bag_clear(&blobs);
  bag_clear(&vers);
}

Changes to src/foci.c.

11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
**
** Author contact information:
**   drh@hwaci.com
**   http://www.hwaci.com/drh/
**
*******************************************************************************
**
** This routine implements an SQLite virtual table that gives all of the
** files associated with a single check-in.
**
** The filename "foci" is short for "Files of Check-in".
**
** Usage example:
**
**    CREATE VIRTUAL TABLE temp.foci USING files_of_checkin;
**                      -- ^^^^--- important!
**    SELECT * FROM foci WHERE checkinID=symbolic_name_to_rid('trunk');
**
** The symbolic_name_to_rid('trunk') function finds the BLOB.RID value
** corresponding to the 'trunk' tag.  Then the files_of_checkin virtual table
** decodes the manifest defined by that BLOB and returns all files described
** by that manifest.  The "schema" for the temp.foci table is:
**
**     CREATE TABLE files_of_checkin(
**       checkinID    INTEGER,    -- RID for the check-in manifest
**       filename     TEXT,       -- Name of a file
**       uuid         TEXT,       -- SHA1 hash of the file







|
|

|



|
<
|


|







11
12
13
14
15
16
17
18
19
20
21
22
23
24
25

26
27
28
29
30
31
32
33
34
35
36
**
** Author contact information:
**   drh@hwaci.com
**   http://www.hwaci.com/drh/
**
*******************************************************************************
**
** This routine implements eponymous virtual table for SQLite that gives
** all of the files associated with a single check-in.
**
** The source code filename "foci" is short for "Files of Check-in".
**
** Usage example:
**
**    SELECT * FROM files_of_checkin

**     WHERE checkinID=symbolic_name_to_rid('trunk');
**
** The symbolic_name_to_rid('trunk') function finds the BLOB.RID value
** corresponding to the 'trunk' tag.  Then the foci virtual table
** decodes the manifest defined by that BLOB and returns all files described
** by that manifest.  The "schema" for the temp.foci table is:
**
**     CREATE TABLE files_of_checkin(
**       checkinID    INTEGER,    -- RID for the check-in manifest
**       filename     TEXT,       -- Name of a file
**       uuid         TEXT,       -- SHA1 hash of the file

Changes to src/import.c.

1535
1536
1537
1538
1539
1540
1541



1542
1543
1544
1545
1546
1547
1548
....
1583
1584
1585
1586
1587
1588
1589

1590
1591
1592
1593
1594
1595
1596
1597
1598




1599
1600
1601
1602
1603
1604
1605
....
1648
1649
1650
1651
1652
1653
1654
1655
1656

1657
1658
1659
1660
1661
1662
1663
....
1727
1728
1729
1730
1731
1732
1733



1734
1735
1736
1737
1738
1739
1740
....
1751
1752
1753
1754
1755
1756
1757











1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768

























1769
1770
1771
1772
1773
1774
1775
** construct a new Fossil repository named by the NEW-REPOSITORY
** argument.  If no input file is supplied the interchange format
** data is read from standard input.
**
** The following formats are currently understood by this command
**
**   --git        Import from the git-fast-export file format (default)



**
**   --svn        Import from the svnadmin-dump file format.  The default
**                behaviour (unless overridden by --flat) is to treat 3
**                folders in the SVN root as special, following the
**                common layout of SVN repositories.  These are (by
**                default) trunk/, branches/ and tags/.  The SVN --deltas
**                format is supported but not required.
................................................................................
*/
void import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int svnFlag = find_option("svn", 0, 0)!=0;

  int omitRebuild = find_option("no-rebuild",0,0)!=0;
  int omitVacuum = find_option("no-vacuum",0,0)!=0;

  /* Options common to all input formats */
  int incrFlag = find_option("incremental", "i", 0)!=0;

  /* Options for --svn only */
  const char *zBase="";
  int flatFlag=0;





  /* Interpret --rename-* options.  Use a table to avoid code duplication. */
  const struct {
    const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf;
    int format; /* 1=git, 2=svn, 3=any */
  } renOpts[] = {
    {"rename-branch", &gimport.zBranchPre,   "", &gimport.zBranchSuf, "", 3},
................................................................................
    zBase = find_option("base", 0, 1);
    flatFlag = find_option("flat", 0, 0)!=0;
    gsvn.zTrunk = find_option("trunk", 0, 1);
    gsvn.zBranches = find_option("branches", 0, 1);
    gsvn.zTags = find_option("tags", 0, 1);
    gsvn.revFlag = find_option("rev-tags", 0, 0)
                || (incrFlag && !find_option("no-rev-tags", 0, 0));
  }else{
    find_option("git",0,0);  /* Skip the --git option for now */

  }
  verify_all_options();

  if( g.argc!=3 && g.argc!=4 ){
    usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?");
  }
  if( g.argc==4 ){
................................................................................
      if( gsvn.zTags[gsvn.lenTags-1]!='/' ){
        gsvn.zTags = mprintf("%s/", gsvn.zTags);
        gsvn.lenTags++;
      }
    }
    svn_dump_import(pIn);
  }else{



    /* The following temp-tables are used to hold information needed for
    ** the import.
    **
    ** The XMARK table provides a mapping from fast-import "marks" and symbols
    ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts).
    ** Given any valid fast-import symbol, the corresponding fossil rid and
    ** uuid can found by searching against the xmark.tname field.
................................................................................
    ** occurrence of the tag is the last until the import finishes.
    */
    db_multi_exec(
       "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
       "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
       "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
    );












    manifest_crosslink_begin();
    git_fast_import(pIn);
    db_prepare(&q, "SELECT tcontent FROM xtag");
    while( db_step(&q)==SQLITE_ROW ){
      Blob record;
      db_ephemeral_blob(&q, 0, &record);
      fast_insert_content(&record, 0, 0, 1);
      import_reset(0);
    }
    db_finalize(&q);

























    manifest_crosslink_end(MC_NONE);
  }

  verify_cancel();
  db_end_transaction(0);
  fossil_print("                               \r");
  if( omitRebuild ){







>
>
>







 







>









>
>
>
>







 







|
|
>







 







>
>
>







 







>
>
>
>
>
>
>
>
>
>
>











>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
....
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
....
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
....
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
....
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
** construct a new Fossil repository named by the NEW-REPOSITORY
** argument.  If no input file is supplied the interchange format
** data is read from standard input.
**
** The following formats are currently understood by this command
**
**   --git        Import from the git-fast-export file format (default)
**                Options:
**                  --import-marks FILE Restore marks table from FILE
**                  --export-marks FILE Save marks table to FILE
**
**   --svn        Import from the svnadmin-dump file format.  The default
**                behaviour (unless overridden by --flat) is to treat 3
**                folders in the SVN root as special, following the
**                common layout of SVN repositories.  These are (by
**                default) trunk/, branches/ and tags/.  The SVN --deltas
**                format is supported but not required.
................................................................................
*/
void import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int svnFlag = find_option("svn", 0, 0)!=0;
  int gitFlag = find_option("git", 0, 0)!=0;
  int omitRebuild = find_option("no-rebuild",0,0)!=0;
  int omitVacuum = find_option("no-vacuum",0,0)!=0;

  /* Options common to all input formats */
  int incrFlag = find_option("incremental", "i", 0)!=0;

  /* Options for --svn only */
  const char *zBase="";
  int flatFlag=0;

  /* Options for --git only */
  const char *markfile_in;
  const char *markfile_out;

  /* Interpret --rename-* options.  Use a table to avoid code duplication. */
  const struct {
    const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf;
    int format; /* 1=git, 2=svn, 3=any */
  } renOpts[] = {
    {"rename-branch", &gimport.zBranchPre,   "", &gimport.zBranchSuf, "", 3},
................................................................................
    zBase = find_option("base", 0, 1);
    flatFlag = find_option("flat", 0, 0)!=0;
    gsvn.zTrunk = find_option("trunk", 0, 1);
    gsvn.zBranches = find_option("branches", 0, 1);
    gsvn.zTags = find_option("tags", 0, 1);
    gsvn.revFlag = find_option("rev-tags", 0, 0)
                || (incrFlag && !find_option("no-rev-tags", 0, 0));
  }else if( gitFlag ){
    markfile_in = find_option("import-marks", 0, 1);
    markfile_out = find_option("export-marks", 0, 1);
  }
  verify_all_options();

  if( g.argc!=3 && g.argc!=4 ){
    usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?");
  }
  if( g.argc==4 ){
................................................................................
      if( gsvn.zTags[gsvn.lenTags-1]!='/' ){
        gsvn.zTags = mprintf("%s/", gsvn.zTags);
        gsvn.lenTags++;
      }
    }
    svn_dump_import(pIn);
  }else{
    Bag blobs, vers;
    bag_init(&blobs);
    bag_init(&vers);
    /* The following temp-tables are used to hold information needed for
    ** the import.
    **
    ** The XMARK table provides a mapping from fast-import "marks" and symbols
    ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts).
    ** Given any valid fast-import symbol, the corresponding fossil rid and
    ** uuid can found by searching against the xmark.tname field.
................................................................................
    ** occurrence of the tag is the last until the import finishes.
    */
    db_multi_exec(
       "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
       "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
       "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
    );

    if(markfile_in){
      FILE *f = fossil_fopen(markfile_in, "r");
      if(!f){
        fossil_fatal("cannot open %s for reading\n", markfile_in);
      }
      if(import_marks(f, &blobs, NULL)<0){
        fossil_fatal("error importing marks from file: %s\n", markfile_in);
      }
      fclose(f);
    }

    manifest_crosslink_begin();
    git_fast_import(pIn);
    db_prepare(&q, "SELECT tcontent FROM xtag");
    while( db_step(&q)==SQLITE_ROW ){
      Blob record;
      db_ephemeral_blob(&q, 0, &record);
      fast_insert_content(&record, 0, 0, 1);
      import_reset(0);
    }
    db_finalize(&q);
    if(markfile_out){
      int rid;
      Stmt q_marks;
      FILE *f;
      db_prepare(&q_marks, "SELECT DISTINCT trid FROM xmark");
      while( db_step(&q_marks)==SQLITE_ROW){
        rid = db_column_int(&q_marks, 0);
        if(db_int(0, "SELECT count(objid) FROM event WHERE objid=%d AND type='ci'", rid)==0){
          if(bag_find(&blobs, rid)==0){
            bag_insert(&blobs, rid);
          }
        }else{
          bag_insert(&vers, rid);
        }
      }
      db_finalize(&q_marks);
      f = fossil_fopen(markfile_out, "w");
      if(!f){
        fossil_fatal("cannot open %s for writing\n", markfile_out);
      }
      export_marks(f, &blobs, &vers);
      fclose(f);
      bag_clear(&blobs);
      bag_clear(&vers);
    }
    manifest_crosslink_end(MC_NONE);
  }

  verify_cancel();
  db_end_transaction(0);
  fossil_print("                               \r");
  if( omitRebuild ){

Changes to src/info.c.

1328
1329
1330
1331
1332
1333
1334

1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347

1348
1349
1350



1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
....
1381
1382
1383
1384
1385
1386
1387



1388
1389
1390
1391















1392
1393
1394
1395
1396
1397
1398
    );
    while( db_step(&q)==SQLITE_ROW ){
      const char *zDate = db_column_text(&q, 0);
      const char *zUser = db_column_text(&q, 1);
      const char *zCom = db_column_text(&q, 2);
      const char *zType = db_column_text(&q, 3);
      const char *zUuid = db_column_text(&q, 4);

      if( cnt>0 ){
        @ Also
      }
      if( zType[0]=='w' ){
        @ Wiki edit
        objType |= OBJTYPE_WIKI;
      }else if( zType[0]=='t' ){
        @ Ticket change
        objType |= OBJTYPE_TICKET;
      }else if( zType[0]=='c' ){
        @ Manifest of check-in
        objType |= OBJTYPE_CHECKIN;
      }else if( zType[0]=='e' ){

        @ Instance of technote
        objType |= OBJTYPE_EVENT;
        hyperlink_to_event_tagid(db_column_int(&q, 5));



      }else{
        @ Tag referencing
      }
      if( zType[0]!='e' ){
        hyperlink_to_uuid(zUuid);
      }
      @ - %!W(zCom) by
      hyperlink_to_user(zUser,zDate," on");
      hyperlink_to_date(zDate, ".");
      if( pDownloadName && blob_size(pDownloadName)==0 ){
        blob_appendf(pDownloadName, "%S.txt", zUuid);
................................................................................
    if( cnt>0 ){
      @ Also attachment "%h(zFilename)" to
    }else{
      @ Attachment "%h(zFilename)" to
    }
    objType |= OBJTYPE_ATTACHMENT;
    if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){



      if( g.perm.Hyperlink && g.anon.RdTkt ){
        @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>]
      }else{
        @ ticket [%S(zTarget)]















      }
    }else{
      if( g.perm.Hyperlink && g.anon.RdWiki ){
        @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>]
      }else{
        @ wiki page [%h(zTarget)]
      }







>













>
|
|
|
>
>
>



|







 







>
>
>
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
....
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
    );
    while( db_step(&q)==SQLITE_ROW ){
      const char *zDate = db_column_text(&q, 0);
      const char *zUser = db_column_text(&q, 1);
      const char *zCom = db_column_text(&q, 2);
      const char *zType = db_column_text(&q, 3);
      const char *zUuid = db_column_text(&q, 4);
      int eventTagId = db_column_int(&q, 5);
      if( cnt>0 ){
        @ Also
      }
      if( zType[0]=='w' ){
        @ Wiki edit
        objType |= OBJTYPE_WIKI;
      }else if( zType[0]=='t' ){
        @ Ticket change
        objType |= OBJTYPE_TICKET;
      }else if( zType[0]=='c' ){
        @ Manifest of check-in
        objType |= OBJTYPE_CHECKIN;
      }else if( zType[0]=='e' ){
        if( eventTagId != 0) {
          @ Instance of technote
          objType |= OBJTYPE_EVENT;
          hyperlink_to_event_tagid(db_column_int(&q, 5));
        }else{
          @ Attachment to technote 
        }
      }else{
        @ Tag referencing
      }
      if( zType[0]!='e' || eventTagId == 0){
        hyperlink_to_uuid(zUuid);
      }
      @ - %!W(zCom) by
      hyperlink_to_user(zUser,zDate," on");
      hyperlink_to_date(zDate, ".");
      if( pDownloadName && blob_size(pDownloadName)==0 ){
        blob_appendf(pDownloadName, "%S.txt", zUuid);
................................................................................
    if( cnt>0 ){
      @ Also attachment "%h(zFilename)" to
    }else{
      @ Attachment "%h(zFilename)" to
    }
    objType |= OBJTYPE_ATTACHMENT;
    if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){
      if ( db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'",
            zTarget)
      ){
        if( g.perm.Hyperlink && g.anon.RdTkt ){
          @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>]
        }else{
          @ ticket [%S(zTarget)]
        }
      }else if( db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'",
            zTarget)
      ){
        if( g.perm.Hyperlink && g.anon.RdWiki ){
          @ tech note [%z(href("%R/technote/%h",zTarget))%S(zTarget)</a>]
        }else{
          @ tech note [%S(zTarget)]
        }
      }else{
        if( g.perm.Hyperlink && g.anon.RdWiki ){
          @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>]
        }else{
          @ wiki page [%h(zTarget)]
        }
      }
    }else{
      if( g.perm.Hyperlink && g.anon.RdWiki ){
        @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>]
      }else{
        @ wiki page [%h(zTarget)]
      }

Changes to src/json_wiki.c.

372
373
374
375
376
377
378

379
380
381
382
383
384
385
386
387
  jstr = cson_value_get_string(contentV);
  contentLen = (int)cson_string_length_bytes(jstr);
  if(contentLen){
    blob_append(&content, cson_string_cstr(jstr),contentLen);
  }

  zMimeType = json_find_option_cstr("mimetype","mimetype","M");


  wiki_cmd_commit(zPageName, 0==rid, &content, zMimeType, 0);
  blob_reset(&content);
  /*
    Our return value here has a race condition: if this operation
    is called concurrently for the same wiki page via two requests,
    payV could reflect the results of the other save operation.
  */
  payV = json_get_wiki_page_by_name(







>

|







372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  jstr = cson_value_get_string(contentV);
  contentLen = (int)cson_string_length_bytes(jstr);
  if(contentLen){
    blob_append(&content, cson_string_cstr(jstr),contentLen);
  }

  zMimeType = json_find_option_cstr("mimetype","mimetype","M");
  zMimeType = wiki_filter_mimetypes(zMimeType);

  wiki_cmd_commit(zPageName, rid, &content, zMimeType, 0);
  blob_reset(&content);
  /*
    Our return value here has a race condition: if this operation
    is called concurrently for the same wiki page via two requests,
    payV could reflect the results of the other save operation.
  */
  payV = json_get_wiki_page_by_name(

Changes to src/manifest.c.

2064
2065
2066
2067
2068
2069
2070
2071
2072
2073

2074

2075
2076
2077
2078
2079
2080
2081
2082
....
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170


2171

2172
2173
2174
2175
2176
2177
2178
      const char *zTarget = db_column_text(&qatt, 2);
      const char *zName = db_column_text(&qatt, 3);
      const char isAdd = (zSrc && zSrc[0]) ? 1 : 0;
      char *zComment;
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to"
             " tech note [/technote/%h|%.10h]",
             zSrc, zName, zTarget, zTarget); 
      }else{

        zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]",

             zName, zTarget);
      }
      db_multi_exec("UPDATE event SET comment=%Q, type='e'"
                       " WHERE objid=%Q",
                    zComment, zAttachId);
      fossil_free(zComment);      
    }
    db_finalize(&qatt);
................................................................................
      }else{
        zComment = mprintf("Delete attachment \"%h\" from wiki page [%h]",
             p->zAttachName, p->zAttachTarget);
      }
    }else if( 'e' == attachToType ){
      if( isAdd ){
        zComment = mprintf(
          "Add attachment [/artifact/%!S|%h] to tech note [/technote/%h|%.10h]",
          p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); 
      }else{
        zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]",


             p->zAttachName, p->zAttachTarget);

      }      
    }else{
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget);
      }else{







|


>
|
>
|







 







|


|
>
>
|
>







2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
....
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
      const char *zTarget = db_column_text(&qatt, 2);
      const char *zName = db_column_text(&qatt, 3);
      const char isAdd = (zSrc && zSrc[0]) ? 1 : 0;
      char *zComment;
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to"
             " tech note [/technote/%!S|%S]",
             zSrc, zName, zTarget, zTarget); 
      }else{
        zComment = mprintf(
             "Delete attachment \"%h\" from"
             " tech note [/technote/%!S|%S]",
             zName, zTarget, zTarget);
      }
      db_multi_exec("UPDATE event SET comment=%Q, type='e'"
                       " WHERE objid=%Q",
                    zComment, zAttachId);
      fossil_free(zComment);      
    }
    db_finalize(&qatt);
................................................................................
      }else{
        zComment = mprintf("Delete attachment \"%h\" from wiki page [%h]",
             p->zAttachName, p->zAttachTarget);
      }
    }else if( 'e' == attachToType ){
      if( isAdd ){
        zComment = mprintf(
          "Add attachment [/artifact/%!S|%h] to tech note [/technote/%!S|%S]",
          p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); 
      }else{
        zComment = mprintf(
             "Delete attachment \"/artifact/%!S|%h\" from"
             " tech note [/technote/%!S|%S]",
             p->zAttachName, p->zAttachName,
             p->zAttachTarget,p->zAttachTarget);
      }      
    }else{
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget);
      }else{

Changes to src/merge.c.

126
127
128
129
130
131
132




































133
134
135
136
137
138
139
...
176
177
178
179
180
181
182

183
184
185
186
187
188
189
190
191
192
193
194
195
196
197

198
199
200
201
202
203
204
205

206
207
208
209
210
211
212
...
289
290
291
292
293
294
295

296
297
298
299
300
301
302
303
304

305
306
307
308
309
310
311
312
313
314
315









316
317
318
319
320
321
322

323
324
325
326
327
328
329
...
341
342
343
344
345
346
347











348
349


350
351
352
353
354
355
356
...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376

377
378
379
380

381
382








383
384
385
386
387
388






389
390
391
392
393
394

395
396
397
398
399
400
401
402
403
404






405
406
407
408
409
410
411

412
413
414

415
416
417
418
419
420


421
422
423
424
425
426
427
428
429
430
431
432
433

434
435
436
437
438
439
440

441
442
443
444
445
446
447
448




449
450
451
452
453
454
455
456
457
458
459
460
461
462
463

464
465
466
467
468
469
470
471
472
473
474
475
476
477
...
479
480
481
482
483
484
485

486
487
488
489





















490
491
492
493
494
495
496
...
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
...
659
660
661
662
663
664
665










666
667
668
669
670
671
672
673
674
675
676
677
678

679
680
681
682


683
684

685
686



687

688











689
690
691
692
693

694
695
696
697
698
699
700










































701
702
703
704
705
706
707
        fForkSeen = fossil_find_nearest_fork(rid, db_open_local(0))!=0;
      }
    }
  }
  db_finalize(&q);
  return fForkSeen;
}





































/*
** COMMAND: merge
**
** Usage: %fossil merge ?OPTIONS? ?VERSION?
**
** The argument VERSION is a version that should be merged into the
................................................................................
**
**   -v|--verbose            Show additional details of the merge
*/
void merge_cmd(void){
  int vid;              /* Current version "V" */
  int mid;              /* Version we are merging from "M" */
  int pid;              /* The pivot version - most recent common ancestor P */

  int verboseFlag;      /* True if the -v|--verbose option is present */
  int integrateFlag;    /* True if the --integrate option is present */
  int pickFlag;         /* True if the --cherrypick option is present */
  int backoutFlag;      /* True if the --backout option is present */
  int dryRunFlag;       /* True if the --dry-run or -n option is present */
  int forceFlag;        /* True if the --force or -f option is present */
  int forceMissingFlag; /* True if the --force-missing option is present */
  const char *zBinGlob; /* The value of --binary */
  const char *zPivot;   /* The value of --baseline */
  int debugFlag;        /* True if --debug is present */
  int nChng;            /* Number of file name changes */
  int *aChng;           /* An array of file name changes */
  int i;                /* Loop counter */
  int nConflict = 0;    /* Number of conflicts seen */
  int nOverwrite = 0;   /* Number of unmanaged files overwritten */

  Stmt q;


  /* Notation:
  **
  **      V     The current checkout
  **      M     The version being merged in
  **      P     The "pivot" - the most recent common ancestor of V and M.

  */

  undo_capture_command_line();
  verboseFlag = find_option("verbose","v",0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail",0,0)!=0; /* deprecated */
................................................................................
    pid = name_to_typed_rid(zPivot, "ci");
    if( pid==0 || !is_a_version(pid) ){
      fossil_fatal("not a version: %s", zPivot);
    }
    if( pickFlag ){
      fossil_fatal("incompatible options: --cherrypick & --baseline");
    }

  }else if( pickFlag || backoutFlag ){
    if( integrateFlag ){
      fossil_fatal("incompatible options: --integrate & --cherrypick or --backout");
    }
    pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid);
    if( pid<=0 ){
      fossil_fatal("cannot find an ancestor for %s", g.argv[2]);
    }
  }else{

    pivot_set_primary(mid);
    pivot_set_secondary(vid);
    db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0");
    while( db_step(&q)==SQLITE_ROW ){
      pivot_set_secondary(db_column_int(&q,0));
    }
    db_finalize(&q);
    pid = pivot_find();
    if( pid<=0 ){
      fossil_fatal("cannot find a common ancestor between the current "
                   "checkout and %s", g.argv[2]);









    }
  }
  if( backoutFlag ){
    int t = pid;
    pid = mid;
    mid = t;
  }

  if( !is_a_version(pid) ){
    fossil_fatal("not a version: record #%d", pid);
  }
  if( !forceFlag && mid==pid ){
    fossil_print("Merge skipped because it is a no-op. "
                 " Use --force to override.\n");
    return;
................................................................................
  if( !dryRunFlag ) undo_begin();
  if( load_vfile_from_rid(mid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( load_vfile_from_rid(pid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }











  if( debugFlag ){
    char *z;


    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid);
    fossil_print("P=%d %z\n", pid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid);
    fossil_print("M=%d %z\n", mid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid);
    fossil_print("V=%d %z\n", vid, z);
  }
................................................................................
  ** The vfile.pathname field is used to match files against each other.  The
  ** FV table contains one row for each each unique filename in
  ** in the current checkout, the pivot, and the version being merged.
  */
  db_multi_exec(
    "DROP TABLE IF EXISTS fv;"
    "CREATE TEMP TABLE fv("
    "  fn TEXT PRIMARY KEY %s,"   /* The filename */
    "  idv INTEGER,"              /* VFILE entry for current version */
    "  idp INTEGER,"              /* VFILE entry for the pivot */
    "  idm INTEGER,"              /* VFILE entry for version merging in */
    "  chnged BOOLEAN,"           /* True if current version has been edited */
    "  ridv INTEGER,"             /* Record ID for current version */
    "  ridp INTEGER,"             /* Record ID for pivot */
    "  ridm INTEGER,"             /* Record ID for merge */
    "  isexe BOOLEAN,"            /* Execute permission enabled */
    "  fnp TEXT %s,"              /* The filename in the pivot */
    "  fnm TEXT %s,"              /* the filename in the merged version */

    "  islinkv BOOLEAN,"          /* True if current version is a symlink */
    "  islinkm BOOLEAN"           /* True if merged version in is a symlink */
    ");",
    filename_collation(), filename_collation(), filename_collation()

  );









  /* Add files found in V
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged "






    " FROM vfile WHERE vid=%d",
    vid
  );

  /*
  ** Compute name changes from P->V

  */
  find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0);
  if( nChng ){
    for(i=0; i<nChng; i++){
      char *z;
      z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]);
      db_multi_exec(
        "UPDATE fv SET fnp=%Q, fnm=%Q"
        " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)",
        z, z, aChng[i*2+1]






      );
      free(z);
    }
    fossil_free(aChng);
    db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn");
  }


  /* Add files found in P but not in V
  */
  db_multi_exec(

    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)",
    pid, filename_collation()


  );

  /*
  ** Compute name changes from P->M
  */
  find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0);
  if( nChng ){
    if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)");
    for(i=0; i<nChng; i++){
      db_multi_exec(
        "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)"
        " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)",
        aChng[i*2+1], aChng[i*2]

      );
    }
    fossil_free(aChng);
  }

  /* Add files found in M but not in P or V.
  */

  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d"
    "    AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)",
    mid, filename_collation()




  );

  /*
  ** Compute the file version ids for P and M.
  */
  db_multi_exec(
    "UPDATE fv SET"
    " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " islinkv=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0),"
    " islinkm=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0)",

    pid, pid, mid, mid, vid, mid
  );

  if( debugFlag ){
    db_prepare(&q,
       "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, "
       "       isexe, islinkv, islinkm FROM fv"
    );
    while( db_step(&q)==SQLITE_ROW ){
       fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d "
                    " islinkv=%d islinkm=%d\n",
          db_column_int(&q, 0),
          db_column_int(&q, 5),
          db_column_int(&q, 6),
................................................................................
          db_column_int(&q, 4),
          db_column_int(&q, 8),
          db_column_int(&q, 9),
          db_column_int(&q, 10));
       fossil_print("     fn  = [%s]\n", db_column_text(&q, 1));
       fossil_print("     fnp = [%s]\n", db_column_text(&q, 2));
       fossil_print("     fnm = [%s]\n", db_column_text(&q, 3));

    }
    db_finalize(&q);
  }






















  /*
  ** Find files in M and V but not in P and report conflicts.
  ** The file in M will be ignored.  It will be treated as if it
  ** does not exist.
  */
  db_prepare(&q,
    "SELECT idm FROM fv WHERE idp=0 AND idv>0 AND idm>0"
................................................................................
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm);
    fossil_warning("WARNING: no common ancestor for %s", zName);
    free(zName);
    db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm);
  }
  db_finalize(&q);

  /*
  ** Add to V files that are not in V or P but are in M
  */
  db_prepare(&q,
    "SELECT idm, rowid, fnm FROM fv AS x"
    " WHERE idp=0 AND idv=0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    int rowid = db_column_int(&q, 1);
    int idv;
    const char *zName;
    char *zFullName;
    db_multi_exec(
      "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)"
      "  SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d",
      vid, integrateFlag?5:3, idm
    );
    idv = db_last_insert_rowid();
    db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid);
    zName = db_column_text(&q, 2);
    zFullName = mprintf("%s%s", g.zLocalRoot, zName);
    if( file_wd_isfile_or_link(zFullName) ){
      fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName);
      nOverwrite++;
    }else{
      fossil_print("ADDED %s\n", zName);
    }
    fossil_free(zFullName);
    if( !dryRunFlag ){
      undo_save(zName);
      vfile_to_disk(0, idm, 0, 0);
    }
  }
  db_finalize(&q);

  /*
  ** Find files that have changed from P->M but not P->V.
  ** Copy the M content over into V.
  */
  db_prepare(&q,
................................................................................
      char *zFullPath = mprintf("%s%s", g.zLocalRoot, zName);
      file_delete(zFullPath);
      free(zFullPath);
    }
  }
  db_finalize(&q);











  /*
  ** Rename files that have taken a rename on P->M but which keep the same
  ** name on P->V.  If a file is renamed on P->V only or on both P->V and
  ** P->M then we retain the V name of the file.
  */
  db_prepare(&q,
    "SELECT idv, fnp, fnm FROM fv"
    " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zOldName = db_column_text(&q, 1);
    const char *zNewName = db_column_text(&q, 2);

    fossil_print("RENAME %s -> %s\n", zOldName, zNewName);
    if( !dryRunFlag ) undo_save(zOldName);
    if( !dryRunFlag ) undo_save(zNewName);
    db_multi_exec(


      "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)"
      " WHERE id=%d AND vid=%d", zNewName, idv, vid

    );
    if( !dryRunFlag ){



      char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName);

      char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName);











      if( file_wd_islink(zFullOldPath) ){
        symlink_copy(zFullOldPath, zFullNewPath);
      }else{
        file_copy(zFullOldPath, zFullNewPath);
      }

      file_delete(zFullOldPath);
      free(zFullNewPath);
      free(zFullOldPath);
    }
  }
  db_finalize(&q);












































  /* Report on conflicts
  */
  if( nConflict ){
    fossil_warning("WARNING: %d merge conflicts", nConflict);
  }
  if( nOverwrite ){







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>










<
<
<


>








>







 







>
|








>
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>







>







 







>
>
>
>
>
>
>
>
>
>
>


>
>







 







|
|
|
|

|
|
|

|
|
>



|
>


>
>
>
>
>
>
>
>
|


<
<
<
>
>
>
>
>
>
|
|



<
>

<
<
<
<
<
|
<
<
<
>
>
>
>
>
>
|
<
|
<
<
<
<
>
|


>
|
<
<
<
<
<
>
>



|

|
<
<
<
|
<
<
<
>
|
<
<
<
<
<
<
>
|
<
<
<
<
<
<
<
>
>
>
>
|
|
<
<
<


<
<


|

<
|
>
|





|







 







>




>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







 







>
>
>
>
>
>
>
>
>
>






|






>




>
>

|
>


>
>
>
|
>
|
>
>
>
>
>
>
>
>
>
>
>





>







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
...
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229



230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
...
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
...
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456



457
458
459
460
461
462
463
464
465
466
467

468
469





470



471
472
473
474
475
476
477

478




479
480
481
482
483
484





485
486
487
488
489
490
491
492



493



494
495






496
497







498
499
500
501
502
503



504
505


506
507
508
509

510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
...
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
...
568
569
570
571
572
573
574




































575
576
577
578
579
580
581
...
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
        fForkSeen = fossil_find_nearest_fork(rid, db_open_local(0))!=0;
      }
    }
  }
  db_finalize(&q);
  return fForkSeen;
}

/*
** Add an entry to the FV table for all files renamed between
** version N and the version specified by vid.
*/
static void add_renames(
  const char *zFnCol, /* The FV column for the filename in vid */
  int vid,            /* The desired version's RID */
  int nid,            /* Version N's RID */
  int revOk,          /* Ok to move backwards (child->parent) if true */
  const char *zDebug  /* Generate trace output if not NULL */
){
  int nChng;  /* Number of file name changes */
  int *aChng; /* An array of file name changes */
  int i;      /* Loop counter */
  find_filename_changes(nid, vid, revOk, &nChng, &aChng, zDebug);
  if( nChng==0 ) return;
  for(i=0; i<nChng; i++){
    char *zN, *zV;
    zN = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]);
    zV = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2+1]);
    db_multi_exec(
      "INSERT OR IGNORE INTO fv(%s,fnn) VALUES(%Q,%Q)",
      zFnCol /*safe-for-%s*/, zV, zN
    );
    if( db_changes()==0 ){
      db_multi_exec(
        "UPDATE fv SET %s=%Q WHERE fnn=%Q",
        zFnCol /*safe-for-%s*/, zV, zN
      );
    }
    free(zN);
    free(zV);
  }
  free(aChng);
}

/*
** COMMAND: merge
**
** Usage: %fossil merge ?OPTIONS? ?VERSION?
**
** The argument VERSION is a version that should be merged into the
................................................................................
**
**   -v|--verbose            Show additional details of the merge
*/
void merge_cmd(void){
  int vid;              /* Current version "V" */
  int mid;              /* Version we are merging from "M" */
  int pid;              /* The pivot version - most recent common ancestor P */
  int nid = 0;          /* The name pivot version "N" */
  int verboseFlag;      /* True if the -v|--verbose option is present */
  int integrateFlag;    /* True if the --integrate option is present */
  int pickFlag;         /* True if the --cherrypick option is present */
  int backoutFlag;      /* True if the --backout option is present */
  int dryRunFlag;       /* True if the --dry-run or -n option is present */
  int forceFlag;        /* True if the --force or -f option is present */
  int forceMissingFlag; /* True if the --force-missing option is present */
  const char *zBinGlob; /* The value of --binary */
  const char *zPivot;   /* The value of --baseline */
  int debugFlag;        /* True if --debug is present */



  int nConflict = 0;    /* Number of conflicts seen */
  int nOverwrite = 0;   /* Number of unmanaged files overwritten */
  char vAncestor = 'p'; /* If P is an ancestor of V then 'p', else 'n' */
  Stmt q;


  /* Notation:
  **
  **      V     The current checkout
  **      M     The version being merged in
  **      P     The "pivot" - the most recent common ancestor of V and M.
  **      N     The "name pivot" - for detecting renames
  */

  undo_capture_command_line();
  verboseFlag = find_option("verbose","v",0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail",0,0)!=0; /* deprecated */
................................................................................
    pid = name_to_typed_rid(zPivot, "ci");
    if( pid==0 || !is_a_version(pid) ){
      fossil_fatal("not a version: %s", zPivot);
    }
    if( pickFlag ){
      fossil_fatal("incompatible options: --cherrypick & --baseline");
    }
  }
  if( pickFlag || backoutFlag ){
    if( integrateFlag ){
      fossil_fatal("incompatible options: --integrate & --cherrypick or --backout");
    }
    pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid);
    if( pid<=0 ){
      fossil_fatal("cannot find an ancestor for %s", g.argv[2]);
    }
  }else{
    if( !zPivot ){
      pivot_set_primary(mid);
      pivot_set_secondary(vid);
      db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0");
      while( db_step(&q)==SQLITE_ROW ){
        pivot_set_secondary(db_column_int(&q,0));
      }
      db_finalize(&q);
      pid = pivot_find(0);
      if( pid<=0 ){
        fossil_fatal("cannot find a common ancestor between the current "
                     "checkout and %s", g.argv[2]);
      }
    }
    pivot_set_primary(mid);
    pivot_set_secondary(vid);
    nid = pivot_find(1);
    if( nid!=pid ){
      pivot_set_primary(nid);
      pivot_set_secondary(pid);
      nid = pivot_find(1);
    }
  }
  if( backoutFlag ){
    int t = pid;
    pid = mid;
    mid = t;
  }
  if( nid==0 ) nid = pid;
  if( !is_a_version(pid) ){
    fossil_fatal("not a version: record #%d", pid);
  }
  if( !forceFlag && mid==pid ){
    fossil_print("Merge skipped because it is a no-op. "
                 " Use --force to override.\n");
    return;
................................................................................
  if( !dryRunFlag ) undo_begin();
  if( load_vfile_from_rid(mid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( load_vfile_from_rid(pid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( zPivot ){
    vAncestor = db_exists(
      "WITH RECURSIVE ancestor(id) AS ("
      "  VALUES(%d)"
      "  UNION ALL"
      "  SELECT pid FROM plink, ancestor"
      "   WHERE cid=ancestor.id AND pid!=%d AND cid!=%d)"
      "SELECT 1 FROM ancestor WHERE id=%d LIMIT 1",
      vid, nid, pid, pid
    ) ? 'p' : 'n';
  }
  if( debugFlag ){
    char *z;
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", nid);
    fossil_print("N=%d %z\n", nid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid);
    fossil_print("P=%d %z\n", pid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid);
    fossil_print("M=%d %z\n", mid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid);
    fossil_print("V=%d %z\n", vid, z);
  }
................................................................................
  ** The vfile.pathname field is used to match files against each other.  The
  ** FV table contains one row for each each unique filename in
  ** in the current checkout, the pivot, and the version being merged.
  */
  db_multi_exec(
    "DROP TABLE IF EXISTS fv;"
    "CREATE TEMP TABLE fv("
    "  fn TEXT UNIQUE %s,"        /* The filename */
    "  idv INTEGER DEFAULT 0,"    /* VFILE entry for current version */
    "  idp INTEGER DEFAULT 0,"    /* VFILE entry for the pivot */
    "  idm INTEGER DEFAULT 0,"    /* VFILE entry for version merging in */
    "  chnged BOOLEAN,"           /* True if current version has been edited */
    "  ridv INTEGER DEFAULT 0,"   /* Record ID for current version */
    "  ridp INTEGER DEFAULT 0,"   /* Record ID for pivot */
    "  ridm INTEGER DEFAULT 0,"   /* Record ID for merge */
    "  isexe BOOLEAN,"            /* Execute permission enabled */
    "  fnp TEXT UNIQUE %s,"       /* The filename in the pivot */
    "  fnm TEXT UNIQUE %s,"       /* The filename in the merged version */
    "  fnn TEXT UNIQUE %s,"       /* The filename in the name pivot */
    "  islinkv BOOLEAN,"          /* True if current version is a symlink */
    "  islinkm BOOLEAN"           /* True if merged version in is a symlink */
    ");",
    filename_collation(), filename_collation(), filename_collation(),
    filename_collation()
  );

  /*
  ** Compute name changes from N to V, P, and M
  */
  add_renames("fn", vid, nid, 0, debugFlag ? "N->V" : 0);
  add_renames("fnp", pid, nid, 0, debugFlag ? "N->P" : 0);
  add_renames("fnm", mid, nid, backoutFlag, debugFlag ? "N->M" : 0);

  /*
  ** Add files found in V
  */
  db_multi_exec(



    "UPDATE OR IGNORE fv SET fn=coalesce(fn%c,fnn) WHERE fn IS NULL;"
    "REPLACE INTO fv(fn,fnp,fnm,fnn,idv,ridv,islinkv,isexe,chnged)"
    " SELECT pathname, fnp, fnm, fnn, id, rid, islink, vf.isexe, vf.chnged"
    "   FROM vfile vf"
    "   LEFT JOIN fv ON fn=coalesce(origname,pathname)"
    "    AND rid>0 AND vf.chnged NOT IN (3,5)"
    "  WHERE vid=%d;",
    vAncestor, vid
  );

  /*

  ** Add files found in P
  */





  db_multi_exec(



    "UPDATE OR IGNORE fv SET fnp=coalesce(fnn,"
    "   (SELECT coalesce(origname,pathname) FROM vfile WHERE id=idv))"
    " WHERE fnp IS NULL;"
    "INSERT OR IGNORE INTO fv(fnp)"
    " SELECT coalesce(origname,pathname) FROM vfile WHERE vid=%d;",
    pid
  );






  /*
  ** Add files found in M
  */
  db_multi_exec(
    "UPDATE OR IGNORE fv SET fnm=fnp WHERE fnm IS NULL;"
    "INSERT OR IGNORE INTO fv(fnm)"





    " SELECT pathname FROM vfile WHERE vid=%d;",
    mid
  );

  /*
  ** Compute the file version ids for P and M
  */
  if( pid==vid ){



    db_multi_exec(



      "UPDATE fv SET idp=idv, ridp=ridv WHERE ridv>0 AND chnged NOT IN (3,5)"
    );






  }else{
    db_multi_exec(







      "UPDATE fv SET"
      " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0),"
      " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)",
      pid, pid
    );
  }



  db_multi_exec(
    "UPDATE fv SET"


    " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " islinkm=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0),"

    " isexe=coalesce((SELECT isexe FROM vfile WHERE vid=%d AND fnm=pathname),"
    "   isexe)",
    mid, mid, mid, mid
  );

  if( debugFlag ){
    db_prepare(&q,
       "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, "
       "       isexe, islinkv, islinkm, fnn FROM fv"
    );
    while( db_step(&q)==SQLITE_ROW ){
       fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d "
                    " islinkv=%d islinkm=%d\n",
          db_column_int(&q, 0),
          db_column_int(&q, 5),
          db_column_int(&q, 6),
................................................................................
          db_column_int(&q, 4),
          db_column_int(&q, 8),
          db_column_int(&q, 9),
          db_column_int(&q, 10));
       fossil_print("     fn  = [%s]\n", db_column_text(&q, 1));
       fossil_print("     fnp = [%s]\n", db_column_text(&q, 2));
       fossil_print("     fnm = [%s]\n", db_column_text(&q, 3));
       fossil_print("     fnn = [%s]\n", db_column_text(&q, 11));
    }
    db_finalize(&q);
  }

  /*
  ** Update the execute bit on files where it's changed from P->M but not P->V
  */
  db_prepare(&q,
    "SELECT idv, fn, fv.isexe FROM fv, vfile p, vfile v"
    " WHERE p.id=idp AND v.id=idv AND fv.isexe!=p.isexe AND v.isexe=p.isexe"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zName = db_column_text(&q, 1);
    int isExe = db_column_int(&q, 2);
    fossil_print("%s %s\n", isExe ? "EXECUTABLE" : "UNEXEC", zName);
    if( !dryRunFlag ){
      char *zFullPath = mprintf("%s/%s", g.zLocalRoot, zName);
      file_wd_setexe(zFullPath, isExe);
      free(zFullPath);
      db_multi_exec("UPDATE vfile SET isexe=%d WHERE id=%d", isExe, idv);
    }
  }
  db_finalize(&q);

  /*
  ** Find files in M and V but not in P and report conflicts.
  ** The file in M will be ignored.  It will be treated as if it
  ** does not exist.
  */
  db_prepare(&q,
    "SELECT idm FROM fv WHERE idp=0 AND idv>0 AND idm>0"
................................................................................
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm);
    fossil_warning("WARNING: no common ancestor for %s", zName);
    free(zName);
    db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm);
  }




































  db_finalize(&q);

  /*
  ** Find files that have changed from P->M but not P->V.
  ** Copy the M content over into V.
  */
  db_prepare(&q,
................................................................................
      char *zFullPath = mprintf("%s%s", g.zLocalRoot, zName);
      file_delete(zFullPath);
      free(zFullPath);
    }
  }
  db_finalize(&q);

  /* For certain sets of renames (e.g. A -> B and B -> A), a file that is
  ** being renamed must first be moved to a temporary location to avoid
  ** being overwritten by another rename operation. A row is added to the
  ** TMPRN table for each of these temporary renames.
  */
  db_multi_exec(
    "DROP TABLE IF EXISTS tmprn;"
    "CREATE TEMP TABLE tmprn(fn UNIQUE, tmpfn);"
  );

  /*
  ** Rename files that have taken a rename on P->M but which keep the same
  ** name on P->V.  If a file is renamed on P->V only or on both P->V and
  ** P->M then we retain the V name of the file.
  */
  db_prepare(&q,
    "SELECT idv, fnp, fnm, isexe FROM fv"
    " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zOldName = db_column_text(&q, 1);
    const char *zNewName = db_column_text(&q, 2);
    int isExe = db_column_int(&q, 3);
    fossil_print("RENAME %s -> %s\n", zOldName, zNewName);
    if( !dryRunFlag ) undo_save(zOldName);
    if( !dryRunFlag ) undo_save(zNewName);
    db_multi_exec(
      "UPDATE vfile SET pathname=NULL, origname=pathname"
      " WHERE vid=%d AND pathname=%Q;"
      "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)"
      " WHERE id=%d;",
      vid, zNewName, zNewName, idv
    );
    if( !dryRunFlag ){
      char *zFullOldPath, *zFullNewPath;
      zFullOldPath = db_text(0,"SELECT tmpfn FROM tmprn WHERE fn=%Q", zOldName);
      if( !zFullOldPath ){
        zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName);
      }
      zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName);
      if( file_wd_size(zFullNewPath)>=0 ){
        char zTmpPath[300];
        file_tempname(sizeof(zTmpPath), zTmpPath);
        db_multi_exec("INSERT INTO tmprn(fn,tmpfn) VALUES(%Q,%Q)",
                      zNewName, zTmpPath);
        if( file_wd_islink(zFullNewPath) ){
          symlink_copy(zFullNewPath, zTmpPath);
        }else{
          file_copy(zFullNewPath, zTmpPath);
        }
      }
      if( file_wd_islink(zFullOldPath) ){
        symlink_copy(zFullOldPath, zFullNewPath);
      }else{
        file_copy(zFullOldPath, zFullNewPath);
      }
      file_wd_setexe(zFullNewPath, isExe);
      file_delete(zFullOldPath);
      free(zFullNewPath);
      free(zFullOldPath);
    }
  }
  db_finalize(&q);

  /* A file that has been deleted and replaced by a renamed file will have a
  ** NULL pathname. Change it to something that makes the output of "status"
  ** and similar commands make sense for such files and that will (most likely)
  ** not be an actual existing pathname.
  */
  db_multi_exec(
    "UPDATE vfile SET pathname=origname || ' (overwritten by rename)'"
    " WHERE pathname IS NULL"
  );

  /*
  ** Add to V files that are not in V or P but are in M
  */
  db_prepare(&q,
    "SELECT idm, fnm FROM fv"
    " WHERE idp=0 AND idv=0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    const char *zName;
    char *zFullName;
    db_multi_exec(
      "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)"
      "  SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d",
      vid, integrateFlag?5:3, idm
    );
    zName = db_column_text(&q, 1);
    zFullName = mprintf("%s%s", g.zLocalRoot, zName);
    if( file_wd_isfile_or_link(zFullName)
        && !db_exists("SELECT 1 FROM fv WHERE fn=%Q", zName) ){
      fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName);
      nOverwrite++;
    }else{
      fossil_print("ADDED %s\n", zName);
    }
    fossil_free(zFullName);
    if( !dryRunFlag ){
      undo_save(zName);
      vfile_to_disk(0, idm, 0, 0);
    }
  }
  db_finalize(&q);

  /* Report on conflicts
  */
  if( nConflict ){
    fossil_warning("WARNING: %d merge conflicts", nConflict);
  }
  if( nOverwrite ){

Changes to src/path.c.

450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
  }
  db_finalize(&q1);
  if( nChng ){
    aChng = *aiChng = fossil_malloc( nChng*2*sizeof(int) );
    for(pChng=pAll, i=0; pChng; pChng=pChng->pNext){
      if( pChng->newName==0 ) continue;
      if( pChng->origName==0 ) continue;
      if( pChng->newName==pChng->origName ) continue;
      aChng[i] = pChng->origName;
      aChng[i+1] = pChng->newName;
      if( zDebug ){
        fossil_print("%s summary %d[%z] -> %d[%z]\n",
           zDebug,
           aChng[i],
           db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i]),







<







450
451
452
453
454
455
456

457
458
459
460
461
462
463
  }
  db_finalize(&q1);
  if( nChng ){
    aChng = *aiChng = fossil_malloc( nChng*2*sizeof(int) );
    for(pChng=pAll, i=0; pChng; pChng=pChng->pNext){
      if( pChng->newName==0 ) continue;
      if( pChng->origName==0 ) continue;

      aChng[i] = pChng->origName;
      aChng[i+1] = pChng->newName;
      if( zDebug ){
        fossil_print("%s summary %d[%z] -> %d[%z]\n",
           zDebug,
           aChng[i],
           db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i]),

Changes to src/pivot.c.

71
72
73
74
75
76
77


78
79
80
81
82
83
84
85
86
...
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
...
120
121
122
123
124
125
126
127

128
129
130
131
132
133
134
...
159
160
161
162
163
164
165
166
167
168
169
170
  );
}

/*
** Find the most recent common ancestor of the primary and one of
** the secondaries.  Return its rid.  Return 0 if no common ancestor
** can be found.


*/
int pivot_find(void){
  Stmt q1, q2, u1, i1;
  int rid = 0;

  /* aqueue must contain at least one primary and one other.  Otherwise
  ** we abort early
  */
  if( db_int(0, "SELECT count(distinct src) FROM aqueue")<2 ){
................................................................................
  ** is not.
  */
  db_prepare(&q2,
    "SELECT 1 FROM aqueue A, plink, aqueue B"
    " WHERE plink.pid=:rid"
    "   AND plink.cid=B.rid"
    "   AND A.rid=:rid"
    "   AND A.src!=B.src"

  );

  /* Mark the :rid record has having been checked.  It is not the
  ** common ancestor.
  */
  db_prepare(&u1,
    "UPDATE aqueue SET pending=0 WHERE rid=:rid"
................................................................................
    "INSERT OR IGNORE INTO aqueue "
    "SELECT plink.pid,"
    "       coalesce((SELECT mtime FROM plink X WHERE X.cid=plink.pid), 0.0),"
    "       1,"
    "       aqueue.src "
    "  FROM plink, aqueue"
    " WHERE plink.cid=:rid"
    "   AND aqueue.rid=:rid"

  );

  while( db_step(&q1)==SQLITE_ROW ){
    rid = db_column_int(&q1, 0);
    db_reset(&q1);
    db_bind_int(&q2, ":rid", rid);
    if( db_step(&q2)==SQLITE_ROW ){
................................................................................
    usage("PRIMARY SECONDARY ...");
  }
  db_must_be_within_tree();
  pivot_set_primary(name_to_rid(g.argv[2]));
  for(i=3; i<g.argc; i++){
    pivot_set_secondary(name_to_rid(g.argv[i]));
  }
  rid = pivot_find();
  printf("pivot=%s\n",
         db_text("?","SELECT uuid FROM blob WHERE rid=%d",rid)
  );
}







>
>

|







 







|
>







 







|
>







 







|




71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
...
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
...
163
164
165
166
167
168
169
170
171
172
173
174
  );
}

/*
** Find the most recent common ancestor of the primary and one of
** the secondaries.  Return its rid.  Return 0 if no common ancestor
** can be found.
**
** If ignoreMerges is true, follow only "primary" parent links.
*/
int pivot_find(int ignoreMerges){
  Stmt q1, q2, u1, i1;
  int rid = 0;

  /* aqueue must contain at least one primary and one other.  Otherwise
  ** we abort early
  */
  if( db_int(0, "SELECT count(distinct src) FROM aqueue")<2 ){
................................................................................
  ** is not.
  */
  db_prepare(&q2,
    "SELECT 1 FROM aqueue A, plink, aqueue B"
    " WHERE plink.pid=:rid"
    "   AND plink.cid=B.rid"
    "   AND A.rid=:rid"
    "   AND A.src!=B.src %s",
    ignoreMerges ? "AND plink.isprim" : ""
  );

  /* Mark the :rid record has having been checked.  It is not the
  ** common ancestor.
  */
  db_prepare(&u1,
    "UPDATE aqueue SET pending=0 WHERE rid=:rid"
................................................................................
    "INSERT OR IGNORE INTO aqueue "
    "SELECT plink.pid,"
    "       coalesce((SELECT mtime FROM plink X WHERE X.cid=plink.pid), 0.0),"
    "       1,"
    "       aqueue.src "
    "  FROM plink, aqueue"
    " WHERE plink.cid=:rid"
    "   AND aqueue.rid=:rid %s",
    ignoreMerges ? "AND plink.isprim" : ""
  );

  while( db_step(&q1)==SQLITE_ROW ){
    rid = db_column_int(&q1, 0);
    db_reset(&q1);
    db_bind_int(&q2, ":rid", rid);
    if( db_step(&q2)==SQLITE_ROW ){
................................................................................
    usage("PRIMARY SECONDARY ...");
  }
  db_must_be_within_tree();
  pivot_set_primary(name_to_rid(g.argv[2]));
  for(i=3; i<g.argc; i++){
    pivot_set_secondary(name_to_rid(g.argv[i]));
  }
  rid = pivot_find(0);
  printf("pivot=%s\n",
         db_text("?","SELECT uuid FROM blob WHERE rid=%d",rid)
  );
}

Changes to src/sqlite3.c.

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
.....
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
.....
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
.....
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
.....
13063
13064
13065
13066
13067
13068
13069





13070
13071
13072
13073
13074
13075
13076
.....
43113
43114
43115
43116
43117
43118
43119
43120






















43121
43122
43123
43124
43125
43126
43127
.....
43129
43130
43131
43132
43133
43134
43135

























































































43136
43137
43138
43139
43140
43141
43142
.....
43146
43147
43148
43149
43150
43151
43152



43153
43154
43155
43156
43157
43158
43159
43160
43161
43162
43163
43164
43165
43166
43167
43168
43169
43170
43171
43172
43173
43174




43175
43176
43177
43178


43179
43180
43181
43182
43183
43184
43185
.....
43193
43194
43195
43196
43197
43198
43199







43200

43201
43202
43203

43204
43205
43206
43207
43208
43209
43210
43211

43212

43213
43214
43215
43216
43217
43218
43219
.....
43275
43276
43277
43278
43279
43280
43281

43282
43283
43284
43285
43286
43287
43288
.....
43297
43298
43299
43300
43301
43302
43303

43304
43305
43306
43307
43308
43309
43310
.....
43331
43332
43333
43334
43335
43336
43337

43338
43339
43340
43341
43342

43343
43344
43345
43346
43347
43348
43349
43350
43351
43352
43353
43354
43355



43356
43357
43358
43359
43360
43361
43362
.....
43375
43376
43377
43378
43379
43380
43381
43382




43383
43384
43385
43386
43387
43388
43389
.....
43393
43394
43395
43396
43397
43398
43399

43400

43401
43402
43403
43404
43405
43406
43407
.....
43453
43454
43455
43456
43457
43458
43459

43460
43461
43462
43463
43464
43465
43466
43467
43468
43469
43470
43471
43472
43473
43474



43475
43476
43477
43478
43479
43480
43481
43482
43483
43484

43485
43486
43487
43488
43489
43490
43491
43492
43493
43494
43495

43496
43497
43498
43499
43500
43501
43502
43503
43504
43505
43506
43507
43508

43509
43510
43511
43512

43513
43514
43515

43516
43517
43518
43519
43520
43521
43522
43523

43524
43525
43526
43527
43528


43529
43530
43531
43532
43533
43534
43535
43536
43537
43538
43539

43540
43541
43542
43543
43544
43545
43546
43547
43548
43549

43550
43551
43552
43553
43554
43555
43556
.....
43567
43568
43569
43570
43571
43572
43573


43574
43575
43576
43577
43578
43579
43580
.....
43587
43588
43589
43590
43591
43592
43593

43594
43595
43596
43597
43598
43599
43600
.....
43617
43618
43619
43620
43621
43622
43623

43624
43625
43626
43627
43628
43629
43630
.....
47583
47584
47585
47586
47587
47588
47589
47590
47591

47592
47593
47594
47595
47596
47597

47598
47599
47600



47601
47602
47603

47604
47605
47606
47607
47608
47609
47610
.....
47704
47705
47706
47707
47708
47709
47710
47711
47712
47713
47714
47715
47716
47717
47718
.....
47739
47740
47741
47742
47743
47744
47745

47746
47747
47748
47749
47750
47751

47752
47753
47754
47755
47756
47757
47758
.....
48078
48079
48080
48081
48082
48083
48084
48085
48086
48087
48088
48089
48090
48091
48092
48093
48094
48095
48096
48097
48098
48099
48100
48101
48102
48103
48104
48105
48106
48107
48108
48109
48110
48111
48112
48113
48114
48115
48116
48117
48118
48119
48120
48121
48122
48123
48124
48125



48126
48127
48128
48129
48130
48131
48132
.....
51729
51730
51731
51732
51733
51734
51735

51736
51737
51738
51739
51740
51741
51742
.....
51924
51925
51926
51927
51928
51929
51930
51931
51932
51933
51934
51935
51936
51937
51938
51939
51940
51941
51942
51943
51944
51945
51946
51947
51948
.....
52559
52560
52561
52562
52563
52564
52565

52566
52567
52568
52569
52570
52571
52572
.....
52633
52634
52635
52636
52637
52638
52639
52640
52641
52642
52643
52644
52645
52646
52647
52648
.....
59254
59255
59256
59257
59258
59259
59260
59261
59262
59263
59264
59265
59266
59267
59268
59269
59270
59271
59272
59273
59274
59275
59276
59277
59278
59279
59280
59281
59282
59283
59284
59285
59286
59287
59288
59289
59290
......
192848
192849
192850
192851
192852
192853
192854
192855
192856
192857
192858
192859
192860
192861
192862
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.13.0"
#define SQLITE_VERSION_NUMBER 3013000
#define SQLITE_SOURCE_ID      "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
................................................................................
**
** In other words, ALWAYS and NEVER are added for defensive code.
**
** When doing coverage testing ALWAYS and NEVER are hard-coded to
** be true and false so that the unreachable code they specify will
** not be counted as untested code.
*/
#if defined(SQLITE_COVERAGE_TEST)
# define ALWAYS(X)      (1)
# define NEVER(X)       (0)
#elif !defined(NDEBUG)
# define ALWAYS(X)      ((X)?1:(assert(0),0))
# define NEVER(X)       ((X)?(assert(0),1):0)
#else
# define ALWAYS(X)      (X)
................................................................................
** Every page in the cache is controlled by an instance of the following
** structure.
*/
struct PgHdr {
  sqlite3_pcache_page *pPage;    /* Pcache object page handle */
  void *pData;                   /* Page data */
  void *pExtra;                  /* Extra content */
  PgHdr *pDirty;                 /* Transient list of dirty pages */
  Pager *pPager;                 /* The pager this page is part of */
  Pgno pgno;                     /* Page number for this page */
#ifdef SQLITE_CHECK_PAGES
  u32 pageHash;                  /* Hash of page content */
#endif
  u16 flags;                     /* PGHDR flags defined below */

................................................................................

/* Bit values for PgHdr.flags */
#define PGHDR_CLEAN           0x001  /* Page not on the PCache.pDirty list */
#define PGHDR_DIRTY           0x002  /* Page is on the PCache.pDirty list */
#define PGHDR_WRITEABLE       0x004  /* Journaled and ready to modify */
#define PGHDR_NEED_SYNC       0x008  /* Fsync the rollback journal before
                                     ** writing this page to the database */
#define PGHDR_NEED_READ       0x010  /* Content is unread */
#define PGHDR_DONT_WRITE      0x020  /* Do not write content to disk */
#define PGHDR_MMAP            0x040  /* This is an mmap page object */

#define PGHDR_WAL_APPEND      0x080  /* Appended to wal file */

/* Initialize and shutdown the page cache subsystem */
SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
SQLITE_PRIVATE void sqlite3PcacheShutdown(void);

/* Page cache buffer management:
** These routines implement SQLITE_CONFIG_PAGECACHE.
................................................................................
#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
/* Iterate through all dirty pages currently stored in the cache. This
** interface is only available if SQLITE_CHECK_PAGES is defined when the 
** library is built.
*/
SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *));
#endif






/* Set and get the suggested cache-size for the specified pager-cache.
**
** If no global maximum is configured, then the system attempts to limit
** the total number of pages cached by purgeable pager-caches to the sum
** of the suggested cache-sizes.
*/
................................................................................
**
*************************************************************************
** This file implements that page cache.
*/
/* #include "sqliteInt.h" */

/*
** A complete page cache is an instance of this structure.






















*/
struct PCache {
  PgHdr *pDirty, *pDirtyTail;         /* List of dirty pages in LRU order */
  PgHdr *pSynced;                     /* Last synced page in dirty page list */
  int nRefSum;                        /* Sum of ref counts over all pages */
  int szCache;                        /* Configured cache size */
  int szSpill;                        /* Size before spilling occurs */
................................................................................
  int szExtra;                        /* Size of extra space for each page */
  u8 bPurgeable;                      /* True if pages are on backing store */
  u8 eCreate;                         /* eCreate value for for xFetch() */
  int (*xStress)(void*,PgHdr*);       /* Call to try make a page clean */
  void *pStress;                      /* Argument to xStress */
  sqlite3_pcache *pCache;             /* Pluggable cache module */
};


























































































/********************************** Linked List Management ********************/

/* Allowed values for second argument to pcacheManageDirtyList() */
#define PCACHE_DIRTYLIST_REMOVE   1    /* Remove pPage from dirty list */
#define PCACHE_DIRTYLIST_ADD      2    /* Add pPage to the dirty list */
#define PCACHE_DIRTYLIST_FRONT    3    /* Move pPage to the front of the list */
................................................................................
** argument determines what operation to do.  The 0x01 bit means first
** remove pPage from the dirty list.  The 0x02 means add pPage back to
** the dirty list.  Doing both moves pPage to the front of the dirty list.
*/
static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
  PCache *p = pPage->pCache;




  if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
    assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
    assert( pPage->pDirtyPrev || pPage==p->pDirty );
  
    /* Update the PCache1.pSynced variable if necessary. */
    if( p->pSynced==pPage ){
      PgHdr *pSynced = pPage->pDirtyPrev;
      while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){
        pSynced = pSynced->pDirtyPrev;
      }
      p->pSynced = pSynced;
    }
  
    if( pPage->pDirtyNext ){
      pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
    }else{
      assert( pPage==p->pDirtyTail );
      p->pDirtyTail = pPage->pDirtyPrev;
    }
    if( pPage->pDirtyPrev ){
      pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
    }else{




      assert( pPage==p->pDirty );
      p->pDirty = pPage->pDirtyNext;
      if( p->pDirty==0 && p->bPurgeable ){
        assert( p->eCreate==1 );


        p->eCreate = 2;
      }
    }
    pPage->pDirtyNext = 0;
    pPage->pDirtyPrev = 0;
  }
  if( addRemove & PCACHE_DIRTYLIST_ADD ){
................................................................................
      p->pDirtyTail = pPage;
      if( p->bPurgeable ){
        assert( p->eCreate==2 );
        p->eCreate = 1;
      }
    }
    p->pDirty = pPage;







    if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){

      p->pSynced = pPage;
    }
  }

}

/*
** Wrapper around the pluggable caches xUnpin method. If the cache is
** being used for an in-memory database, this function is a no-op.
*/
static void pcacheUnpin(PgHdr *p){
  if( p->pCache->bPurgeable ){

    sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);

  }
}

/*
** Compute the number of pages of cache requested.   p->szCache is the
** cache size requested by the "PRAGMA cache_size" statement.
*/
................................................................................
  p->szExtra = szExtra;
  p->bPurgeable = bPurgeable;
  p->eCreate = 2;
  p->xStress = xStress;
  p->pStress = pStress;
  p->szCache = 100;
  p->szSpill = 1;

  return sqlite3PcacheSetPageSize(p, szPage);
}

/*
** Change the page size for PCache object. The caller must ensure that there
** are no outstanding page references when this function is called.
*/
................................................................................
    if( pNew==0 ) return SQLITE_NOMEM_BKPT;
    sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
    if( pCache->pCache ){
      sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
    }
    pCache->pCache = pNew;
    pCache->szPage = szPage;

  }
  return SQLITE_OK;
}

/*
** Try to obtain a page from the cache.
**
................................................................................
*/
SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
  PCache *pCache,       /* Obtain the page from this cache */
  Pgno pgno,            /* Page number to obtain */
  int createFlag        /* If true, create page if it does not exist already */
){
  int eCreate;


  assert( pCache!=0 );
  assert( pCache->pCache!=0 );
  assert( createFlag==3 || createFlag==0 );
  assert( pgno>0 );


  /* eCreate defines what to do if the page does not exist.
  **    0     Do not allocate a new page.  (createFlag==0)
  **    1     Allocate a new page if doing so is inexpensive.
  **          (createFlag==1 AND bPurgeable AND pDirty)
  **    2     Allocate a new page even it doing so is difficult.
  **          (createFlag==1 AND !(bPurgeable AND pDirty)
  */
  eCreate = createFlag & pCache->eCreate;
  assert( eCreate==0 || eCreate==1 || eCreate==2 );
  assert( createFlag==0 || pCache->eCreate==eCreate );
  assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
  return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);



}

/*
** If the sqlite3PcacheFetch() routine is unable to allocate a new
** page because no clean pages are available for reuse and the cache
** size limit has been reached, then this routine can be invoked to 
** try harder to allocate a page.  This routine might invoke the stress
................................................................................
  if( pCache->eCreate==2 ) return 0;

  if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){
    /* Find a dirty page to write-out and recycle. First try to find a 
    ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
    ** cleared), but if that is not possible settle for any other 
    ** unreferenced dirty page.
    */




    for(pPg=pCache->pSynced; 
        pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); 
        pPg=pPg->pDirtyPrev
    );
    pCache->pSynced = pPg;
    if( !pPg ){
      for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
................................................................................
#ifdef SQLITE_LOG_CACHE_SPILL
      sqlite3_log(SQLITE_FULL, 
                  "spill page %d making room for %d - cache used: %d/%d",
                  pPg->pgno, pgno,
                  sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache),
                numberOfCachePages(pCache));
#endif

      rc = pCache->xStress(pCache->pStress, pPg);

      if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
        return rc;
      }
    }
  }
  *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
  return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
................................................................................
  pPgHdr = (PgHdr *)pPage->pExtra;

  if( !pPgHdr->pPage ){
    return pcacheFetchFinishWithInit(pCache, pgno, pPage);
  }
  pCache->nRefSum++;
  pPgHdr->nRef++;

  return pPgHdr;
}

/*
** Decrement the reference count on a page. If the page is clean and the
** reference count drops to 0, then it is made eligible for recycling.
*/
SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
  assert( p->nRef>0 );
  p->pCache->nRefSum--;
  if( (--p->nRef)==0 ){
    if( p->flags&PGHDR_CLEAN ){
      pcacheUnpin(p);
    }else if( p->pDirtyPrev!=0 ){
      /* Move the page to the head of the dirty list. */



      pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
    }
  }
}

/*
** Increase the reference count of a supplied page by 1.
*/
SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){
  assert(p->nRef>0);

  p->nRef++;
  p->pCache->nRefSum++;
}

/*
** Drop a page from the cache. There must be exactly one reference to the
** page. This function deletes that reference, so after it returns the
** page pointed to by p is invalid.
*/
SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
  assert( p->nRef==1 );

  if( p->flags&PGHDR_DIRTY ){
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
  }
  p->pCache->nRefSum--;
  sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
}

/*
** Make sure the page is marked as dirty. If it isn't dirty already,
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
  assert( p->nRef>0 );

  if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){
    p->flags &= ~PGHDR_DONT_WRITE;
    if( p->flags & PGHDR_CLEAN ){
      p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);

      assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
      pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
    }

  }
}

/*
** Make sure the page is marked as clean. If it isn't clean already,
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){

  if( (p->flags & PGHDR_DIRTY) ){
    assert( (p->flags & PGHDR_CLEAN)==0 );
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
    p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
    p->flags |= PGHDR_CLEAN;


    if( p->nRef==0 ){
      pcacheUnpin(p);
    }
  }
}

/*
** Make every page in the cache clean.
*/
SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){
  PgHdr *p;

  while( (p = pCache->pDirty)!=0 ){
    sqlite3PcacheMakeClean(p);
  }
}

/*
** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
*/
SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){
  PgHdr *p;

  for(p=pCache->pDirty; p; p=p->pDirtyNext){
    p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
  }
  pCache->pSynced = pCache->pDirtyTail;
}

/*
................................................................................
/*
** Change the page number of page p to newPgno. 
*/
SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
  PCache *pCache = p->pCache;
  assert( p->nRef>0 );
  assert( newPgno>0 );


  sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
  p->pgno = newPgno;
  if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
  }
}

................................................................................
** function is 0, then the data area associated with page 1 is zeroed, but
** the page object is not dropped.
*/
SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
  if( pCache->pCache ){
    PgHdr *p;
    PgHdr *pNext;

    for(p=pCache->pDirty; p; p=pNext){
      pNext = p->pDirtyNext;
      /* This routine never gets call with a positive pgno except right
      ** after sqlite3PcacheCleanAll().  So if there are dirty pages,
      ** it must be that pgno==0.
      */
      assert( p->pgno>0 );
................................................................................
}

/*
** Close a cache.
*/
SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){
  assert( pCache->pCache!=0 );

  sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
}

/* 
** Discard the contents of the cache.
*/
SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){
................................................................................
  }
  return rc;
}

static int pager_truncate(Pager *pPager, Pgno nPage);

/*
** The write transaction open on the pager passed as the only argument is
** being committed. This function returns true if all dirty pages should

** be flushed to disk, or false otherwise. Pages should be flushed to disk
** unless one of the following is true:
**
**   * The db is an in-memory database.
**
**   * The db is a temporary database and the db file has not been opened.

**
**   * The db is a temporary database and the cache contains less than
**     C/4 dirty pages, where C is the configured cache-size.



*/
static int pagerFlushOnCommit(Pager *pPager){
  if( pPager->tempFile==0 ) return 1;

  if( !isOpen(pPager->fd) ) return 0;
  return (sqlite3PCachePercentDirty(pPager->pPCache)>=25);
}

/*
** This routine ends a transaction. A transaction is usually ended by 
** either a COMMIT or a ROLLBACK operation. This routine may be called 
................................................................................
          rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags);
        }
      }
      pPager->journalOff = 0;
    }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST
      || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL)
    ){
      rc = zeroJournalHdr(pPager, hasMaster);
      pPager->journalOff = 0;
    }else{
      /* This branch may be executed with Pager.journalMode==MEMORY if
      ** a hot-journal was just rolled back. In this case the journal
      ** file should be closed and deleted. If this connection writes to
      ** the database file, it will do so using an in-memory journal.
      */
................................................................................
    }
  }
#endif

  sqlite3BitvecDestroy(pPager->pInJournal);
  pPager->pInJournal = 0;
  pPager->nRec = 0;

  if( MEMDB || pagerFlushOnCommit(pPager) ){
    sqlite3PcacheCleanAll(pPager->pPCache);
  }else{
    sqlite3PcacheClearWritable(pPager->pPCache);
  }
  sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize);


  if( pagerUseWal(pPager) ){
    /* Drop the WAL write-lock, if any. Also, if the connection was in 
    ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE 
    ** lock held on the database file.
    */
    rc2 = sqlite3WalEndWriteTransaction(pPager->pWal);
................................................................................
    assert( isSavepnt );
    assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 );
    pPager->doNotSpill |= SPILLFLAG_ROLLBACK;
    rc = sqlite3PagerGet(pPager, pgno, &pPg, 1);
    assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 );
    pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK;
    if( rc!=SQLITE_OK ) return rc;
    pPg->flags &= ~PGHDR_NEED_READ;
    sqlite3PcacheMakeDirty(pPg);
  }
  if( pPg ){
    /* No page should ever be explicitly rolled back that is in use, except
    ** for page 1 which is held in use in order to keep the lock on the
    ** database active. However such a page may be rolled back as a result
    ** of an internal error resulting in an automatic call to
    ** sqlite3PagerRollback().
    */
    void *pData;
    pData = pPg->pData;
    memcpy(pData, (u8*)aData, pPager->pageSize);
    pPager->xReiniter(pPg);
    if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){
      /* If the contents of this page were just restored from the main 
      ** journal file, then its content must be as they were when the 
      ** transaction was first opened. In this case we can mark the page
      ** as clean, since there will be no need to write it out to the
      ** database.
      **
      ** There is one exception to this rule. If the page is being rolled
      ** back as part of a savepoint (or statement) rollback from an 
      ** unsynced portion of the main journal file, then it is not safe
      ** to mark the page as clean. This is because marking the page as
      ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is
      ** already in the journal file (recorded in Pager.pInJournal) and
      ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to
      ** again within this transaction, it will be marked as dirty but
      ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially
      ** be written out into the database file before its journal file
      ** segment is synced. If a crash occurs during or following this,
      ** database corruption may ensue.
      **
      ** Update: Another exception is for temp files that are not 
      ** in-memory databases. In this case the page may have been dirty
      ** at the start of the transaction.
      */
      assert( !pagerUseWal(pPager) );
      if( pPager->tempFile==0 ) sqlite3PcacheMakeClean(pPg);
    }



    pager_set_pagehash(pPg);

    /* If this was page 1, then restore the value of Pager.dbFileVers.
    ** Do this before any decoding. */
    if( pgno==1 ){
      memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers));
    }
................................................................................
SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){
  Pager *pPager = pPg->pPager;
  if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){
    PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)));
    IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno))
    pPg->flags |= PGHDR_DONT_WRITE;
    pPg->flags &= ~PGHDR_WRITEABLE;

    pager_set_pagehash(pPg);
  }
}

/*
** This routine is called to increment the value of the database file 
** change-counter, stored as a 4-byte big-endian integer starting at 
................................................................................
  );
  assert( assert_pager_state(pPager) );

  /* If a prior error occurred, report that error again. */
  if( NEVER(pPager->errCode) ) return pPager->errCode;

  /* Provide the ability to easily simulate an I/O error during testing */
  if( (rc = sqlite3FaultSim(400))!=SQLITE_OK ) return rc;

  PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", 
      pPager->zFilename, zMaster, pPager->dbSize));

  /* If no database changes have been made, return early. */
  if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK;

  assert( MEMDB==0 || pPager->tempFile );
  assert( isOpen(pPager->fd) || pPager->tempFile );
  if( 0==pagerFlushOnCommit(pPager) ){
    /* If this is an in-memory db, or no pages have been written to, or this
    ** function has already been called, it is mostly a no-op.  However, any
    ** backup in progress needs to be restarted.  */
    sqlite3BackupRestart(pPager->pBackup);
  }else{
    if( pagerUseWal(pPager) ){
      PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache);
................................................................................
       || pPager->eState==PAGER_WRITER_DBMOD
  );
  assert( assert_pager_state(pPager) );

  /* In order to be able to rollback, an in-memory database must journal
  ** the page we are moving from.
  */

  if( pPager->tempFile ){
    rc = sqlite3PagerWrite(pPg);
    if( rc ) return rc;
  }

  /* If the page being moved is dirty and has not been saved by the latest
  ** savepoint, then save the current contents of the page into the 
................................................................................
  sqlite3PcacheMove(pPg, pgno);
  sqlite3PcacheMakeDirty(pPg);

  /* For an in-memory database, make sure the original page continues
  ** to exist, in case the transaction needs to roll back.  Use pPgOld
  ** as the original page since it has already been allocated.
  */
  if( pPager->tempFile ){
    assert( pPgOld );
    sqlite3PcacheMove(pPgOld, origPgno);
    sqlite3PagerUnrefNotNull(pPgOld);
  }

  if( needSyncPgno ){
    /* If needSyncPgno is non-zero, then the journal file needs to be 
    ** sync()ed before any data is written to database file page needSyncPgno.
................................................................................
  assert( sqlite3_mutex_held(pPage->pBt->mutex) );
  pPage->leaf = (u8)(flagByte>>3);  assert( PTF_LEAF == 1<<3 );
  flagByte &= ~PTF_LEAF;
  pPage->childPtrSize = 4-4*pPage->leaf;
  pPage->xCellSize = cellSizePtr;
  pBt = pPage->pBt;
  if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
    /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior
    ** table b-tree page. */
    assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
    /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf
    ** table b-tree page. */
    assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
    pPage->intKey = 1;
    if( pPage->leaf ){
      pPage->intKeyLeaf = 1;
      pPage->xParseCell = btreeParseCellPtr;
    }else{
      pPage->intKeyLeaf = 0;
      pPage->xCellSize = cellSizePtrNoPayload;
      pPage->xParseCell = btreeParseCellPtrNoPayload;
    }
    pPage->maxLocal = pBt->maxLeaf;
    pPage->minLocal = pBt->minLeaf;
  }else if( flagByte==PTF_ZERODATA ){
    /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior
    ** index b-tree page. */
    assert( (PTF_ZERODATA)==2 );
    /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf
    ** index b-tree page. */
    assert( (PTF_ZERODATA|PTF_LEAF)==10 );
    pPage->intKey = 0;
    pPage->intKeyLeaf = 0;
    pPage->xParseCell = btreeParseCellPtrIndex;
    pPage->maxLocal = pBt->maxLocal;
    pPage->minLocal = pBt->minLocal;
  }else{
................................................................................
static void fts5SourceIdFunc(
  sqlite3_context *pCtx,          /* Function call context */
  int nArg,                       /* Number of args */
  sqlite3_value **apUnused        /* Function arguments */
){
  assert( nArg==0 );
  UNUSED_PARAM2(nArg, apUnused);
  sqlite3_result_text(pCtx, "fts5: 2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea", -1, SQLITE_TRANSIENT);
}

static int fts5Init(sqlite3 *db){
  static const sqlite3_module fts5Mod = {
    /* iVersion      */ 2,
    /* xCreate       */ fts5CreateMethod,
    /* xConnect      */ fts5ConnectMethod,







|







 







|







 







|







 







<
|
|

|







 







>
>
>
>
>







 







|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>
>
>






|
<
<
<
<











>
>
>
>


|
<
>
>







 







>
>
>
>
>
>
>
|
>



>








>

>







 







>







 







>







 







>





>












|
>
>
>







 







|
>
>
>
>







 







>

>







 







>













|
|
>
>
>










>











>













>
|



>



>








>
|




>
>











>










>







 







>
>







 







>







 







>







 







|
|
>
|
<

|

|
>

<
|
>
>
>

|

>







 







|







 







>
|
|
|
|
|
|
>







 







<













<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
<
>
>
>







 







>







 







|









|







 







>







 







|
<







 







|
|

|
|













|
|

|
|







 







|







361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
.....
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
.....
12952
12953
12954
12955
12956
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
.....
12977
12978
12979
12980
12981
12982
12983

12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
.....
13062
13063
13064
13065
13066
13067
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078
13079
13080
.....
43117
43118
43119
43120
43121
43122
43123
43124
43125
43126
43127
43128
43129
43130
43131
43132
43133
43134
43135
43136
43137
43138
43139
43140
43141
43142
43143
43144
43145
43146
43147
43148
43149
43150
43151
43152
43153
.....
43155
43156
43157
43158
43159
43160
43161
43162
43163
43164
43165
43166
43167
43168
43169
43170
43171
43172
43173
43174
43175
43176
43177
43178
43179
43180
43181
43182
43183
43184
43185
43186
43187
43188
43189
43190
43191
43192
43193
43194
43195
43196
43197
43198
43199
43200
43201
43202
43203
43204
43205
43206
43207
43208
43209
43210
43211
43212
43213
43214
43215
43216
43217
43218
43219
43220
43221
43222
43223
43224
43225
43226
43227
43228
43229
43230
43231
43232
43233
43234
43235
43236
43237
43238
43239
43240
43241
43242
43243
43244
43245
43246
43247
43248
43249
43250
43251
43252
43253
43254
43255
43256
43257
.....
43261
43262
43263
43264
43265
43266
43267
43268
43269
43270
43271
43272
43273
43274
43275
43276
43277




43278
43279
43280
43281
43282
43283
43284
43285
43286
43287
43288
43289
43290
43291
43292
43293
43294
43295

43296
43297
43298
43299
43300
43301
43302
43303
43304
.....
43312
43313
43314
43315
43316
43317
43318
43319
43320
43321
43322
43323
43324
43325
43326
43327
43328
43329
43330
43331
43332
43333
43334
43335
43336
43337
43338
43339
43340
43341
43342
43343
43344
43345
43346
43347
43348
43349
.....
43405
43406
43407
43408
43409
43410
43411
43412
43413
43414
43415
43416
43417
43418
43419
.....
43428
43429
43430
43431
43432
43433
43434
43435
43436
43437
43438
43439
43440
43441
43442
.....
43463
43464
43465
43466
43467
43468
43469
43470
43471
43472
43473
43474
43475
43476
43477
43478
43479
43480
43481
43482
43483
43484
43485
43486
43487
43488
43489
43490
43491
43492
43493
43494
43495
43496
43497
43498
43499
.....
43512
43513
43514
43515
43516
43517
43518
43519
43520
43521
43522
43523
43524
43525
43526
43527
43528
43529
43530
.....
43534
43535
43536
43537
43538
43539
43540
43541
43542
43543
43544
43545
43546
43547
43548
43549
43550
.....
43596
43597
43598
43599
43600
43601
43602
43603
43604
43605
43606
43607
43608
43609
43610
43611
43612
43613
43614
43615
43616
43617
43618
43619
43620
43621
43622
43623
43624
43625
43626
43627
43628
43629
43630
43631
43632
43633
43634
43635
43636
43637
43638
43639
43640
43641
43642
43643
43644
43645
43646
43647
43648
43649
43650
43651
43652
43653
43654
43655
43656
43657
43658
43659
43660
43661
43662
43663
43664
43665
43666
43667
43668
43669
43670
43671
43672
43673
43674
43675
43676
43677
43678
43679
43680
43681
43682
43683
43684
43685
43686
43687
43688
43689
43690
43691
43692
43693
43694
43695
43696
43697
43698
43699
43700
43701
43702
43703
43704
43705
43706
43707
43708
43709
43710
43711
43712
43713
.....
43724
43725
43726
43727
43728
43729
43730
43731
43732
43733
43734
43735
43736
43737
43738
43739
.....
43746
43747
43748
43749
43750
43751
43752
43753
43754
43755
43756
43757
43758
43759
43760
.....
43777
43778
43779
43780
43781
43782
43783
43784
43785
43786
43787
43788
43789
43790
43791
.....
47744
47745
47746
47747
47748
47749
47750
47751
47752
47753
47754

47755
47756
47757
47758
47759
47760

47761
47762
47763
47764
47765
47766
47767
47768
47769
47770
47771
47772
47773
47774
47775
.....
47869
47870
47871
47872
47873
47874
47875
47876
47877
47878
47879
47880
47881
47882
47883
.....
47904
47905
47906
47907
47908
47909
47910
47911
47912
47913
47914
47915
47916
47917
47918
47919
47920
47921
47922
47923
47924
47925
.....
48245
48246
48247
48248
48249
48250
48251

48252
48253
48254
48255
48256
48257
48258
48259
48260
48261
48262
48263
48264

























48265

48266
48267
48268
48269
48270
48271
48272
48273
48274
48275
.....
51872
51873
51874
51875
51876
51877
51878
51879
51880
51881
51882
51883
51884
51885
51886
.....
52068
52069
52070
52071
52072
52073
52074
52075
52076
52077
52078
52079
52080
52081
52082
52083
52084
52085
52086
52087
52088
52089
52090
52091
52092
.....
52703
52704
52705
52706
52707
52708
52709
52710
52711
52712
52713
52714
52715
52716
52717
.....
52778
52779
52780
52781
52782
52783
52784
52785

52786
52787
52788
52789
52790
52791
52792
.....
59398
59399
59400
59401
59402
59403
59404
59405
59406
59407
59408
59409
59410
59411
59412
59413
59414
59415
59416
59417
59418
59419
59420
59421
59422
59423
59424
59425
59426
59427
59428
59429
59430
59431
59432
59433
59434
......
192992
192993
192994
192995
192996
192997
192998
192999
193000
193001
193002
193003
193004
193005
193006
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.13.0"
#define SQLITE_VERSION_NUMBER 3013000
#define SQLITE_SOURCE_ID      "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
................................................................................
**
** In other words, ALWAYS and NEVER are added for defensive code.
**
** When doing coverage testing ALWAYS and NEVER are hard-coded to
** be true and false so that the unreachable code they specify will
** not be counted as untested code.
*/
#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST)
# define ALWAYS(X)      (1)
# define NEVER(X)       (0)
#elif !defined(NDEBUG)
# define ALWAYS(X)      ((X)?1:(assert(0),0))
# define NEVER(X)       ((X)?(assert(0),1):0)
#else
# define ALWAYS(X)      (X)
................................................................................
** Every page in the cache is controlled by an instance of the following
** structure.
*/
struct PgHdr {
  sqlite3_pcache_page *pPage;    /* Pcache object page handle */
  void *pData;                   /* Page data */
  void *pExtra;                  /* Extra content */
  PgHdr *pDirty;                 /* Transient list of dirty sorted by pgno */
  Pager *pPager;                 /* The pager this page is part of */
  Pgno pgno;                     /* Page number for this page */
#ifdef SQLITE_CHECK_PAGES
  u32 pageHash;                  /* Hash of page content */
#endif
  u16 flags;                     /* PGHDR flags defined below */

................................................................................

/* Bit values for PgHdr.flags */
#define PGHDR_CLEAN           0x001  /* Page not on the PCache.pDirty list */
#define PGHDR_DIRTY           0x002  /* Page is on the PCache.pDirty list */
#define PGHDR_WRITEABLE       0x004  /* Journaled and ready to modify */
#define PGHDR_NEED_SYNC       0x008  /* Fsync the rollback journal before
                                     ** writing this page to the database */

#define PGHDR_DONT_WRITE      0x010  /* Do not write content to disk */
#define PGHDR_MMAP            0x020  /* This is an mmap page object */

#define PGHDR_WAL_APPEND      0x040  /* Appended to wal file */

/* Initialize and shutdown the page cache subsystem */
SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
SQLITE_PRIVATE void sqlite3PcacheShutdown(void);

/* Page cache buffer management:
** These routines implement SQLITE_CONFIG_PAGECACHE.
................................................................................
#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
/* Iterate through all dirty pages currently stored in the cache. This
** interface is only available if SQLITE_CHECK_PAGES is defined when the 
** library is built.
*/
SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *));
#endif

#if defined(SQLITE_DEBUG)
/* Check invariants on a PgHdr object */
SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*);
#endif

/* Set and get the suggested cache-size for the specified pager-cache.
**
** If no global maximum is configured, then the system attempts to limit
** the total number of pages cached by purgeable pager-caches to the sum
** of the suggested cache-sizes.
*/
................................................................................
**
*************************************************************************
** This file implements that page cache.
*/
/* #include "sqliteInt.h" */

/*
** A complete page cache is an instance of this structure.  Every
** entry in the cache holds a single page of the database file.  The
** btree layer only operates on the cached copy of the database pages.
**
** A page cache entry is "clean" if it exactly matches what is currently
** on disk.  A page is "dirty" if it has been modified and needs to be
** persisted to disk.
**
** pDirty, pDirtyTail, pSynced:
**   All dirty pages are linked into the doubly linked list using
**   PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order
**   such that p was added to the list more recently than p->pDirtyNext.
**   PCache.pDirty points to the first (newest) element in the list and
**   pDirtyTail to the last (oldest).
**
**   The PCache.pSynced variable is used to optimize searching for a dirty
**   page to eject from the cache mid-transaction. It is better to eject
**   a page that does not require a journal sync than one that does. 
**   Therefore, pSynced is maintained to that it *almost* always points
**   to either the oldest page in the pDirty/pDirtyTail list that has a
**   clear PGHDR_NEED_SYNC flag or to a page that is older than this one
**   (so that the right page to eject can be found by following pDirtyPrev
**   pointers).
*/
struct PCache {
  PgHdr *pDirty, *pDirtyTail;         /* List of dirty pages in LRU order */
  PgHdr *pSynced;                     /* Last synced page in dirty page list */
  int nRefSum;                        /* Sum of ref counts over all pages */
  int szCache;                        /* Configured cache size */
  int szSpill;                        /* Size before spilling occurs */
................................................................................
  int szExtra;                        /* Size of extra space for each page */
  u8 bPurgeable;                      /* True if pages are on backing store */
  u8 eCreate;                         /* eCreate value for for xFetch() */
  int (*xStress)(void*,PgHdr*);       /* Call to try make a page clean */
  void *pStress;                      /* Argument to xStress */
  sqlite3_pcache *pCache;             /* Pluggable cache module */
};

/********************************** Test and Debug Logic **********************/
/*
** Debug tracing macros.  Enable by by changing the "0" to "1" and
** recompiling.
**
** When sqlite3PcacheTrace is 1, single line trace messages are issued.
** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries
** is displayed for many operations, resulting in a lot of output.
*/
#if defined(SQLITE_DEBUG) && 0
  int sqlite3PcacheTrace = 2;       /* 0: off  1: simple  2: cache dumps */
  int sqlite3PcacheMxDump = 9999;   /* Max cache entries for pcacheDump() */
# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
  void pcacheDump(PCache *pCache){
    int N;
    int i, j;
    sqlite3_pcache_page *pLower;
    PgHdr *pPg;
    unsigned char *a;
  
    if( sqlite3PcacheTrace<2 ) return;
    if( pCache->pCache==0 ) return;
    N = sqlite3PcachePagecount(pCache);
    if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump;
    for(i=1; i<=N; i++){
       pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0);
       if( pLower==0 ) continue;
       pPg = (PgHdr*)pLower->pExtra;
       printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
       a = (unsigned char *)pLower->pBuf;
       for(j=0; j<12; j++) printf("%02x", a[j]);
       printf("\n");
       if( pPg->pPage==0 ){
         sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0);
       }
    }
  }
  #else
# define pcacheTrace(X)
# define pcacheDump(X)
#endif

/*
** Check invariants on a PgHdr entry.  Return true if everything is OK.
** Return false if any invariant is violated.
**
** This routine is for use inside of assert() statements only.  For
** example:
**
**          assert( sqlite3PcachePageSanity(pPg) );
*/
#if SQLITE_DEBUG
SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
  PCache *pCache;
  assert( pPg!=0 );
  assert( pPg->pgno>0 );    /* Page number is 1 or more */
  pCache = pPg->pCache;
  assert( pCache!=0 );      /* Every page has an associated PCache */
  if( pPg->flags & PGHDR_CLEAN ){
    assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
    assert( pCache->pDirty!=pPg );          /* CLEAN pages not on dirty list */
    assert( pCache->pDirtyTail!=pPg );
  }
  /* WRITEABLE pages must also be DIRTY */
  if( pPg->flags & PGHDR_WRITEABLE ){
    assert( pPg->flags & PGHDR_DIRTY );     /* WRITEABLE implies DIRTY */
  }
  /* NEED_SYNC can be set independently of WRITEABLE.  This can happen,
  ** for example, when using the sqlite3PagerDontWrite() optimization:
  **    (1)  Page X is journalled, and gets WRITEABLE and NEED_SEEK.
  **    (2)  Page X moved to freelist, WRITEABLE is cleared
  **    (3)  Page X reused, WRITEABLE is set again
  ** If NEED_SYNC had been cleared in step 2, then it would not be reset
  ** in step 3, and page might be written into the database without first
  ** syncing the rollback journal, which might cause corruption on a power
  ** loss.
  **
  ** Another example is when the database page size is smaller than the
  ** disk sector size.  When any page of a sector is journalled, all pages
  ** in that sector are marked NEED_SYNC even if they are still CLEAN, just
  ** in case they are later modified, since all pages in the same sector
  ** must be journalled and synced before any of those pages can be safely
  ** written.
  */
  return 1;
}
#endif /* SQLITE_DEBUG */


/********************************** Linked List Management ********************/

/* Allowed values for second argument to pcacheManageDirtyList() */
#define PCACHE_DIRTYLIST_REMOVE   1    /* Remove pPage from dirty list */
#define PCACHE_DIRTYLIST_ADD      2    /* Add pPage to the dirty list */
#define PCACHE_DIRTYLIST_FRONT    3    /* Move pPage to the front of the list */
................................................................................
** argument determines what operation to do.  The 0x01 bit means first
** remove pPage from the dirty list.  The 0x02 means add pPage back to
** the dirty list.  Doing both moves pPage to the front of the dirty list.
*/
static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){
  PCache *p = pPage->pCache;

  pcacheTrace(("%p.DIRTYLIST.%s %d\n", p,
                addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT",
                pPage->pgno));
  if( addRemove & PCACHE_DIRTYLIST_REMOVE ){
    assert( pPage->pDirtyNext || pPage==p->pDirtyTail );
    assert( pPage->pDirtyPrev || pPage==p->pDirty );
  
    /* Update the PCache1.pSynced variable if necessary. */
    if( p->pSynced==pPage ){
      p->pSynced = pPage->pDirtyPrev;




    }
  
    if( pPage->pDirtyNext ){
      pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev;
    }else{
      assert( pPage==p->pDirtyTail );
      p->pDirtyTail = pPage->pDirtyPrev;
    }
    if( pPage->pDirtyPrev ){
      pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext;
    }else{
      /* If there are now no dirty pages in the cache, set eCreate to 2. 
      ** This is an optimization that allows sqlite3PcacheFetch() to skip
      ** searching for a dirty page to eject from the cache when it might
      ** otherwise have to.  */
      assert( pPage==p->pDirty );
      p->pDirty = pPage->pDirtyNext;
      assert( p->bPurgeable || p->eCreate==2 );

      if( p->pDirty==0 ){         /*OPTIMIZATION-IF-TRUE*/
        assert( p->bPurgeable==0 || p->eCreate==1 );
        p->eCreate = 2;
      }
    }
    pPage->pDirtyNext = 0;
    pPage->pDirtyPrev = 0;
  }
  if( addRemove & PCACHE_DIRTYLIST_ADD ){
................................................................................
      p->pDirtyTail = pPage;
      if( p->bPurgeable ){
        assert( p->eCreate==2 );
        p->eCreate = 1;
      }
    }
    p->pDirty = pPage;

    /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set
    ** pSynced to point to it. Checking the NEED_SYNC flag is an 
    ** optimization, as if pSynced points to a page with the NEED_SYNC
    ** flag set sqlite3PcacheFetchStress() searches through all newer 
    ** entries of the dirty-list for a page with NEED_SYNC clear anyway.  */
    if( !p->pSynced 
     && 0==(pPage->flags&PGHDR_NEED_SYNC)   /*OPTIMIZATION-IF-FALSE*/
    ){
      p->pSynced = pPage;
    }
  }
  pcacheDump(p);
}

/*
** Wrapper around the pluggable caches xUnpin method. If the cache is
** being used for an in-memory database, this function is a no-op.
*/
static void pcacheUnpin(PgHdr *p){
  if( p->pCache->bPurgeable ){
    pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno));
    sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0);
    pcacheDump(p->pCache);
  }
}

/*
** Compute the number of pages of cache requested.   p->szCache is the
** cache size requested by the "PRAGMA cache_size" statement.
*/
................................................................................
  p->szExtra = szExtra;
  p->bPurgeable = bPurgeable;
  p->eCreate = 2;
  p->xStress = xStress;
  p->pStress = pStress;
  p->szCache = 100;
  p->szSpill = 1;
  pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable));
  return sqlite3PcacheSetPageSize(p, szPage);
}

/*
** Change the page size for PCache object. The caller must ensure that there
** are no outstanding page references when this function is called.
*/
................................................................................
    if( pNew==0 ) return SQLITE_NOMEM_BKPT;
    sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache));
    if( pCache->pCache ){
      sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
    }
    pCache->pCache = pNew;
    pCache->szPage = szPage;
    pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage));
  }
  return SQLITE_OK;
}

/*
** Try to obtain a page from the cache.
**
................................................................................
*/
SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
  PCache *pCache,       /* Obtain the page from this cache */
  Pgno pgno,            /* Page number to obtain */
  int createFlag        /* If true, create page if it does not exist already */
){
  int eCreate;
  sqlite3_pcache_page *pRes;

  assert( pCache!=0 );
  assert( pCache->pCache!=0 );
  assert( createFlag==3 || createFlag==0 );
  assert( pgno>0 );
  assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) );

  /* eCreate defines what to do if the page does not exist.
  **    0     Do not allocate a new page.  (createFlag==0)
  **    1     Allocate a new page if doing so is inexpensive.
  **          (createFlag==1 AND bPurgeable AND pDirty)
  **    2     Allocate a new page even it doing so is difficult.
  **          (createFlag==1 AND !(bPurgeable AND pDirty)
  */
  eCreate = createFlag & pCache->eCreate;
  assert( eCreate==0 || eCreate==1 || eCreate==2 );
  assert( createFlag==0 || pCache->eCreate==eCreate );
  assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
  pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
  pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno,
               createFlag?" create":"",pRes));
  return pRes;
}

/*
** If the sqlite3PcacheFetch() routine is unable to allocate a new
** page because no clean pages are available for reuse and the cache
** size limit has been reached, then this routine can be invoked to 
** try harder to allocate a page.  This routine might invoke the stress
................................................................................
  if( pCache->eCreate==2 ) return 0;

  if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){
    /* Find a dirty page to write-out and recycle. First try to find a 
    ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
    ** cleared), but if that is not possible settle for any other 
    ** unreferenced dirty page.
    **
    ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC
    ** flag is currently referenced, then the following may leave pSynced
    ** set incorrectly (pointing to other than the LRU page with NEED_SYNC
    ** cleared). This is Ok, as pSynced is just an optimization.  */
    for(pPg=pCache->pSynced; 
        pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); 
        pPg=pPg->pDirtyPrev
    );
    pCache->pSynced = pPg;
    if( !pPg ){
      for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev);
................................................................................
#ifdef SQLITE_LOG_CACHE_SPILL
      sqlite3_log(SQLITE_FULL, 
                  "spill page %d making room for %d - cache used: %d/%d",
                  pPg->pgno, pgno,
                  sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache),
                numberOfCachePages(pCache));
#endif
      pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno));
      rc = pCache->xStress(pCache->pStress, pPg);
      pcacheDump(pCache);
      if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){
        return rc;
      }
    }
  }
  *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2);
  return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK;
................................................................................
  pPgHdr = (PgHdr *)pPage->pExtra;

  if( !pPgHdr->pPage ){
    return pcacheFetchFinishWithInit(pCache, pgno, pPage);
  }
  pCache->nRefSum++;
  pPgHdr->nRef++;
  assert( sqlite3PcachePageSanity(pPgHdr) );
  return pPgHdr;
}

/*
** Decrement the reference count on a page. If the page is clean and the
** reference count drops to 0, then it is made eligible for recycling.
*/
SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
  assert( p->nRef>0 );
  p->pCache->nRefSum--;
  if( (--p->nRef)==0 ){
    if( p->flags&PGHDR_CLEAN ){
      pcacheUnpin(p);
    }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/
      /* Move the page to the head of the dirty list. If p->pDirtyPrev==0,
      ** then page p is already at the head of the dirty list and the
      ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE
      ** tag above.  */
      pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
    }
  }
}

/*
** Increase the reference count of a supplied page by 1.
*/
SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){
  assert(p->nRef>0);
  assert( sqlite3PcachePageSanity(p) );
  p->nRef++;
  p->pCache->nRefSum++;
}

/*
** Drop a page from the cache. There must be exactly one reference to the
** page. This function deletes that reference, so after it returns the
** page pointed to by p is invalid.
*/
SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
  assert( p->nRef==1 );
  assert( sqlite3PcachePageSanity(p) );
  if( p->flags&PGHDR_DIRTY ){
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
  }
  p->pCache->nRefSum--;
  sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1);
}

/*
** Make sure the page is marked as dirty. If it isn't dirty already,
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
  assert( p->nRef>0 );
  assert( sqlite3PcachePageSanity(p) );
  if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){    /*OPTIMIZATION-IF-FALSE*/
    p->flags &= ~PGHDR_DONT_WRITE;
    if( p->flags & PGHDR_CLEAN ){
      p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN);
      pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno));
      assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
      pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
    }
    assert( sqlite3PcachePageSanity(p) );
  }
}

/*
** Make sure the page is marked as clean. If it isn't clean already,
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){
  assert( sqlite3PcachePageSanity(p) );
  if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){
    assert( (p->flags & PGHDR_CLEAN)==0 );
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE);
    p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
    p->flags |= PGHDR_CLEAN;
    pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno));
    assert( sqlite3PcachePageSanity(p) );
    if( p->nRef==0 ){
      pcacheUnpin(p);
    }
  }
}

/*
** Make every page in the cache clean.
*/
SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){
  PgHdr *p;
  pcacheTrace(("%p.CLEAN-ALL\n",pCache));
  while( (p = pCache->pDirty)!=0 ){
    sqlite3PcacheMakeClean(p);
  }
}

/*
** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
*/
SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){
  PgHdr *p;
  pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache));
  for(p=pCache->pDirty; p; p=p->pDirtyNext){
    p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE);
  }
  pCache->pSynced = pCache->pDirtyTail;
}

/*
................................................................................
/*
** Change the page number of page p to newPgno. 
*/
SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
  PCache *pCache = p->pCache;
  assert( p->nRef>0 );
  assert( newPgno>0 );
  assert( sqlite3PcachePageSanity(p) );
  pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
  sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
  p->pgno = newPgno;
  if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
    pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
  }
}

................................................................................
** function is 0, then the data area associated with page 1 is zeroed, but
** the page object is not dropped.
*/
SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){
  if( pCache->pCache ){
    PgHdr *p;
    PgHdr *pNext;
    pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno));
    for(p=pCache->pDirty; p; p=pNext){
      pNext = p->pDirtyNext;
      /* This routine never gets call with a positive pgno except right
      ** after sqlite3PcacheCleanAll().  So if there are dirty pages,
      ** it must be that pgno==0.
      */
      assert( p->pgno>0 );
................................................................................
}

/*
** Close a cache.
*/
SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){
  assert( pCache->pCache!=0 );
  pcacheTrace(("%p.CLOSE\n",pCache));
  sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache);
}

/* 
** Discard the contents of the cache.
*/
SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){
................................................................................
  }
  return rc;
}

static int pager_truncate(Pager *pPager, Pgno nPage);

/*
** The write transaction open on pPager is being committed (bCommit==1)
** or rolled back (bCommit==0).
**
** Return TRUE if and only if all dirty pages should be flushed to disk.

**
** Rules:
**
**   *  For non-TEMP databases, always sync to disk.  This is necessary
**      for transactions to be durable.
**

**   *  Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing
**      file has been created already (via a spill on pagerStress()) and
**      when the number of dirty pages in memory exceeds 25% of the total
**      cache size.
*/
static int pagerFlushOnCommit(Pager *pPager, int bCommit){
  if( pPager->tempFile==0 ) return 1;
  if( !bCommit ) return 0;
  if( !isOpen(pPager->fd) ) return 0;
  return (sqlite3PCachePercentDirty(pPager->pPCache)>=25);
}

/*
** This routine ends a transaction. A transaction is usually ended by 
** either a COMMIT or a ROLLBACK operation. This routine may be called 
................................................................................
          rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags);
        }
      }
      pPager->journalOff = 0;
    }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST
      || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL)
    ){
      rc = zeroJournalHdr(pPager, hasMaster||pPager->tempFile);
      pPager->journalOff = 0;
    }else{
      /* This branch may be executed with Pager.journalMode==MEMORY if
      ** a hot-journal was just rolled back. In this case the journal
      ** file should be closed and deleted. If this connection writes to
      ** the database file, it will do so using an in-memory journal.
      */
................................................................................
    }
  }
#endif

  sqlite3BitvecDestroy(pPager->pInJournal);
  pPager->pInJournal = 0;
  pPager->nRec = 0;
  if( rc==SQLITE_OK ){
    if( pagerFlushOnCommit(pPager, bCommit) ){
      sqlite3PcacheCleanAll(pPager->pPCache);
    }else{
      sqlite3PcacheClearWritable(pPager->pPCache);
    }
    sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize);
  }

  if( pagerUseWal(pPager) ){
    /* Drop the WAL write-lock, if any. Also, if the connection was in 
    ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE 
    ** lock held on the database file.
    */
    rc2 = sqlite3WalEndWriteTransaction(pPager->pWal);
................................................................................
    assert( isSavepnt );
    assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 );
    pPager->doNotSpill |= SPILLFLAG_ROLLBACK;
    rc = sqlite3PagerGet(pPager, pgno, &pPg, 1);
    assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 );
    pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK;
    if( rc!=SQLITE_OK ) return rc;

    sqlite3PcacheMakeDirty(pPg);
  }
  if( pPg ){
    /* No page should ever be explicitly rolled back that is in use, except
    ** for page 1 which is held in use in order to keep the lock on the
    ** database active. However such a page may be rolled back as a result
    ** of an internal error resulting in an automatic call to
    ** sqlite3PagerRollback().
    */
    void *pData;
    pData = pPg->pData;
    memcpy(pData, (u8*)aData, pPager->pageSize);
    pPager->xReiniter(pPg);

























    /* It used to be that sqlite3PcacheMakeClean(pPg) was called here.  But

    ** that call was dangerous and had no detectable benefit since the cache
    ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so
    ** has been removed. */
    pager_set_pagehash(pPg);

    /* If this was page 1, then restore the value of Pager.dbFileVers.
    ** Do this before any decoding. */
    if( pgno==1 ){
      memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers));
    }
................................................................................
SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){
  Pager *pPager = pPg->pPager;
  if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){
    PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager)));
    IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno))
    pPg->flags |= PGHDR_DONT_WRITE;
    pPg->flags &= ~PGHDR_WRITEABLE;
    testcase( pPg->flags & PGHDR_NEED_SYNC );
    pager_set_pagehash(pPg);
  }
}

/*
** This routine is called to increment the value of the database file 
** change-counter, stored as a 4-byte big-endian integer starting at 
................................................................................
  );
  assert( assert_pager_state(pPager) );

  /* If a prior error occurred, report that error again. */
  if( NEVER(pPager->errCode) ) return pPager->errCode;

  /* Provide the ability to easily simulate an I/O error during testing */
  if( sqlite3FaultSim(400) ) return SQLITE_IOERR;

  PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", 
      pPager->zFilename, zMaster, pPager->dbSize));

  /* If no database changes have been made, return early. */
  if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK;

  assert( MEMDB==0 || pPager->tempFile );
  assert( isOpen(pPager->fd) || pPager->tempFile );
  if( 0==pagerFlushOnCommit(pPager, 1) ){
    /* If this is an in-memory db, or no pages have been written to, or this
    ** function has already been called, it is mostly a no-op.  However, any
    ** backup in progress needs to be restarted.  */
    sqlite3BackupRestart(pPager->pBackup);
  }else{
    if( pagerUseWal(pPager) ){
      PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache);
................................................................................
       || pPager->eState==PAGER_WRITER_DBMOD
  );
  assert( assert_pager_state(pPager) );

  /* In order to be able to rollback, an in-memory database must journal
  ** the page we are moving from.
  */
  assert( pPager->tempFile || !MEMDB );
  if( pPager->tempFile ){
    rc = sqlite3PagerWrite(pPg);
    if( rc ) return rc;
  }

  /* If the page being moved is dirty and has not been saved by the latest
  ** savepoint, then save the current contents of the page into the 
................................................................................
  sqlite3PcacheMove(pPg, pgno);
  sqlite3PcacheMakeDirty(pPg);

  /* For an in-memory database, make sure the original page continues
  ** to exist, in case the transaction needs to roll back.  Use pPgOld
  ** as the original page since it has already been allocated.
  */
  if( pPager->tempFile && pPgOld ){

    sqlite3PcacheMove(pPgOld, origPgno);
    sqlite3PagerUnrefNotNull(pPgOld);
  }

  if( needSyncPgno ){
    /* If needSyncPgno is non-zero, then the journal file needs to be 
    ** sync()ed before any data is written to database file page needSyncPgno.
................................................................................
  assert( sqlite3_mutex_held(pPage->pBt->mutex) );
  pPage->leaf = (u8)(flagByte>>3);  assert( PTF_LEAF == 1<<3 );
  flagByte &= ~PTF_LEAF;
  pPage->childPtrSize = 4-4*pPage->leaf;
  pPage->xCellSize = cellSizePtr;
  pBt = pPage->pBt;
  if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
    /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an
    ** interior table b-tree page. */
    assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
    /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a
    ** leaf table b-tree page. */
    assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
    pPage->intKey = 1;
    if( pPage->leaf ){
      pPage->intKeyLeaf = 1;
      pPage->xParseCell = btreeParseCellPtr;
    }else{
      pPage->intKeyLeaf = 0;
      pPage->xCellSize = cellSizePtrNoPayload;
      pPage->xParseCell = btreeParseCellPtrNoPayload;
    }
    pPage->maxLocal = pBt->maxLeaf;
    pPage->minLocal = pBt->minLeaf;
  }else if( flagByte==PTF_ZERODATA ){
    /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an
    ** interior index b-tree page. */
    assert( (PTF_ZERODATA)==2 );
    /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a
    ** leaf index b-tree page. */
    assert( (PTF_ZERODATA|PTF_LEAF)==10 );
    pPage->intKey = 0;
    pPage->intKeyLeaf = 0;
    pPage->xParseCell = btreeParseCellPtrIndex;
    pPage->maxLocal = pBt->maxLocal;
    pPage->minLocal = pBt->minLocal;
  }else{
................................................................................
static void fts5SourceIdFunc(
  sqlite3_context *pCtx,          /* Function call context */
  int nArg,                       /* Number of args */
  sqlite3_value **apUnused        /* Function arguments */
){
  assert( nArg==0 );
  UNUSED_PARAM2(nArg, apUnused);
  sqlite3_result_text(pCtx, "fts5: 2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2", -1, SQLITE_TRANSIENT);
}

static int fts5Init(sqlite3 *db){
  static const sqlite3_module fts5Mod = {
    /* iVersion      */ 2,
    /* xCreate       */ fts5CreateMethod,
    /* xConnect      */ fts5ConnectMethod,

Changes to src/sqlite3.h.

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.13.0"
#define SQLITE_VERSION_NUMBER 3013000
#define SQLITE_SOURCE_ID      "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros







|







109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.13.0"
#define SQLITE_VERSION_NUMBER 3013000
#define SQLITE_SOURCE_ID      "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros

Changes to src/wiki.c.

120
121
122
123
124
125
126









127
128
129
130
131
132






133
134
135
136

137
138
139
140
141
142
143
...
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
....
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
....
1131
1132
1133
1134
1135
1136
1137








































1138
1139
1140
1141
1142
1143
1144
1145
1146

1147
1148
1149
1150




1151
1152
1153
1154
1155


1156
1157
1158

1159




1160
1161




1162


1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173

1174









1175
1176
1177
1178
1179
1180
1181
....
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224

1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
....
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284


1285
















1286

1287
1288
1289
1290
1291
1292
1293
1294

1295
1296
1297
1298
1299
1300

1301
1302
1303
1304
1305
1306
1307



1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318


1319
1320
1321
1322
1323
1324

1325
1326





1327
1328
1329

1330
1331




1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
** Return true if the given pagename is the name of the sandbox
*/
static int is_sandbox(const char *zPagename){
  return fossil_stricmp(zPagename,"sandbox")==0 ||
         fossil_stricmp(zPagename,"sand box")==0;
}










/*
** Only allow certain mimetypes through.
** All others become "text/x-fossil-wiki"
*/
const char *wiki_filter_mimetypes(const char *zMimetype){
  if( zMimetype!=0 &&






      ( fossil_strcmp(zMimetype, "text/x-markdown")==0
        || fossil_strcmp(zMimetype, "text/plain")==0 )
  ){
    return zMimetype;

  }
  return "text/x-fossil-wiki";
}

/*
** Render wiki text according to its mimetype.
**
................................................................................
    db_multi_exec("INSERT INTO modreq(objid) VALUES(%d)", nrid);
  }
  db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid);
  db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", nrid);
  manifest_crosslink(nrid, pWiki, MC_NONE);
}

/*
** Formal names and common names for the various wiki styles.
*/
static const char *const azStyles[] = {
  "text/x-fossil-wiki", "Fossil Wiki",
  "text/x-markdown",    "Markdown",
  "text/plain",         "Plain Text"
};

/*
** Output a selection box from which the user can select the
** wiki mimetype.
*/
void mimetype_option_menu(const char *zMimetype){
  unsigned i;
  @ <select name="mimetype" size="1">
  for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=2){
    if( fossil_strcmp(zMimetype,azStyles[i])==0 ){
      @ <option value="%s(azStyles[i])" selected>%s(azStyles[i+1])</option>
    }else{
      @ <option value="%s(azStyles[i])">%s(azStyles[i+1])</option>
    }
  }
  @ </select>
................................................................................
  @ through the matching &lt;/verbatim&gt;.</p></li>
  @ </ol>
  style_footer();
}

/*
** Add a new wiki page to the repository.  The page name is
** given by the zPageName parameter.  isNew must be true to create
** a new page.  If no previous page with the name zPageName exists
** and isNew is false, then this routine throws an error.
**
** The content of the new page is given by the blob pContent.
**
** zMimeType specifies the N-card for the wiki page. If it is 0,
** empty, or "text/x-fossil-wiki" (the default format) then it is
** ignored.
*/
int wiki_cmd_commit(const char *zPageName, int isNew, Blob *pContent,
                    const char *zMimeType, int localUser){
  Blob wiki;              /* Wiki page content */
  Blob cksum;             /* wiki checksum */
  int rid;                /* artifact ID of parent page */
  char *zDate;            /* timestamp */
  char *zUuid;            /* uuid for rid */

  rid = db_int(0,
     "SELECT x.rid FROM tag t, tagxref x"
     " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
     " ORDER BY x.mtime DESC LIMIT 1",
     zPageName
  );
  if( rid==0 && !isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
#endif
    fossil_fatal("no such wiki page: %s", zPageName);
  }
  if( rid!=0 && isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS;
#endif
    fossil_fatal("wiki page %s already exists", zPageName);
  }

  blob_zero(&wiki);
  zDate = date_in_standard_format("now");
  blob_appendf(&wiki, "D %s\n", zDate);
  free(zDate);
  blob_appendf(&wiki, "L %F\n", zPageName );
  if( zMimeType && *zMimeType
      && 0!=fossil_strcmp(zMimeType,"text/x-fossil-wiki") ){
................................................................................
  blob_appendf(&wiki, "Z %b\n", &cksum);
  blob_reset(&cksum);
  db_begin_transaction();
  wiki_put(&wiki, 0, wiki_need_moderation(localUser));
  db_end_transaction(0);
  return 1;
}









































/*
** COMMAND: wiki*
**
** Usage: %fossil wiki (export|create|commit|list) WikiName
**
** Run various subcommands to work with wiki entries or tech notes.
**
**    %fossil wiki export ?PAGENAME? ?FILE? [-t|--technote DATETIME ]

**
**       Sends the latest version of either the PAGENAME wiki entry
**       or the DATETIME tech note to the given file or standard 
**       output. One of PAGENAME or DATETIME must be specified.




**
**    %fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS?
**              
**       Create a new or commit changes to an existing wiki page or 
**       technote from FILE or from standard input.


**
**       Options:
**         -M|--mimetype TEXT-FORMAT   The mimetype of the update defaulting

**                                     to the type used by the previous version




**                                     of the page or text/x-fossil-wiki.
**         -t|--technote DATETIME      Specifies the timestamp of the technote




**                                     to be created or updated.


**         --technote-tags TAGS        The set of tags for a technote.
**         --technote-bgcolor COLOR    The color used for the technote on the
**                                     timeline.
**
**    %fossil wiki list ?--technote?
**    %fossil wiki ls ?--technote?
**
**       Lists all wiki entries, one per line, ordered
**       case-insensitively by name. The --technote flag
**       specifies that technotes will be listed instead of
**       the wiki entries, which will be listed in order

**       timestamp.









**
*/
void wiki_cmd(void){
  int n;
  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto wiki_cmd_usage;
................................................................................
      }
      if( zBody==0 ){
        fossil_fatal("wiki page [%s] not found",zPageName);
      }
      zFile = (g.argc==4) ? "-" : g.argv[4];
    }else{
      if( (g.argc!=3) && (g.argc!=4) ){
        usage("export ?FILE? --technote DATETIME");
      }
      rid = db_int(0, "SELECT objid FROM event"
        " WHERE datetime(mtime)=datetime('%q') AND type='e'"
        " ORDER BY mtime DESC LIMIT 1",
        zETime
      );

      if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("technote not found");
      }
      zFile = (g.argc==3) ? "-" : g.argv[3];
    }
    for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){}
    zBody[i] = 0;
    blob_init(&body, zBody, -1);
    blob_append(&body, "\n", 1);
................................................................................
                     zPageName
                     );
        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }else{
        rid = db_int(0, "SELECT objid FROM event"
                     " WHERE datetime(mtime)=datetime('%q') AND type='e'"
                     " ORDER BY mtime DESC LIMIT 1",
                     zPageName
                     );
        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }


    }
















    if( !zETime ){

      if( g.argv[2][1]=='r' ){
        wiki_cmd_commit(zPageName, 1, &content, zMimeType, 1);
        fossil_print("Created new wiki page %s.\n", zPageName);
      }else{
        wiki_cmd_commit(zPageName, 0, &content, zMimeType, 1);
        fossil_print("Updated wiki page %s.\n", zPageName);
      }
    }else{

      char *zMETime;          /* Normalized, mutable version of zETime */
      zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))",
                        zETime);
      if( g.argv[2][1]=='r' ){
        event_cmd_commit(zMETime, 1, &content, zMimeType, zPageName,
                         zTags, zClr);

        fossil_print("Created new tech note %s.\n", zMETime);
      }else{
        event_cmd_commit(zMETime, 0, &content, zMimeType, zPageName,
                         zTags, zClr);
        fossil_print("Updated tech note %s.\n", zMETime);
      }
      free(zMETime);



    }
    manifest_destroy(pWiki);
    blob_reset(&content);
  }else if( strncmp(g.argv[2],"delete",n)==0 ){
    if( g.argc!=5 ){
      usage("delete PAGENAME");
    }
    fossil_fatal("delete not yet implemented.");
  }else if(( strncmp(g.argv[2],"list",n)==0 )
          || ( strncmp(g.argv[2],"ls",n)==0 )){
    Stmt q;


    if ( !find_option("technote","t",0) ){
      db_prepare(&q,
        "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'"
        " ORDER BY lower(tagname) /*sort*/"
      );
    }else{

      db_prepare(&q,
        "SELECT datetime(mtime) FROM event WHERE type='e'"





        " ORDER BY mtime /*sort*/"
      );
    }

    while( db_step(&q)==SQLITE_ROW ){
      const char *zName = db_column_text(&q, 0);




      fossil_print( "%s\n",zName);
    }
    db_finalize(&q);
  }else{
    goto wiki_cmd_usage;
  }
  return;

wiki_cmd_usage:
  usage("export|create|commit|list ...");
}







>
>
>
>
>
>
>
>
>





|
>
>
>
>
>
>
|
|
<
|
>







 







<
<
<
<
<
<
<
<
<







|







 







|
|
<







|



<



<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







 







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>








|
>

|
|
<
>
>
>
>




|
>
>


|
>
|
>
>
>
>
|
|
>
>
>
>
|
>
>

|
|

|
|


|
<
<
>
|
>
>
>
>
>
>
>
>
>







 







|

|
|
|
<
<
>




|







 







|
<
<
<
<





>
>

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

>

<


<



>
|
|
|
<
|

>
|
|
<
<
|
|
|
>
>
>











>
>






>

<
>
>
>
>
>
|


>


>
>
>
>











120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

150
151
152
153
154
155
156
157
158
...
425
426
427
428
429
430
431









432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
....
1072
1073
1074
1075
1076
1077
1078
1079
1080

1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091

1092
1093
1094



















1095
1096
1097
1098
1099
1100
1101
....
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175

1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213


1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
....
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272


1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
....
1317
1318
1319
1320
1321
1322
1323
1324




1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351

1352
1353

1354
1355
1356
1357
1358
1359
1360

1361
1362
1363
1364
1365


1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392

1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
** Return true if the given pagename is the name of the sandbox
*/
static int is_sandbox(const char *zPagename){
  return fossil_stricmp(zPagename,"sandbox")==0 ||
         fossil_stricmp(zPagename,"sand box")==0;
}

/*
** Formal, common and short names for the various wiki styles.
*/
static const char *const azStyles[] = {
  "text/x-fossil-wiki", "Fossil Wiki", "wiki",
  "text/x-markdown",    "Markdown",    "markdown",
  "text/plain",         "Plain Text",  "plain"
};

/*
** Only allow certain mimetypes through.
** All others become "text/x-fossil-wiki"
*/
const char *wiki_filter_mimetypes(const char *zMimetype){
  if( zMimetype!=0 ){
    int i;    
    for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){
      if( fossil_strcmp(zMimetype,azStyles[i+2])==0 ){
        return azStyles[i];
      }
    }
    if(  fossil_strcmp(zMimetype, "text/x-markdown")==0
        || fossil_strcmp(zMimetype, "text/plain")==0 ){

      return zMimetype;
    }
  }
  return "text/x-fossil-wiki";
}

/*
** Render wiki text according to its mimetype.
**
................................................................................
    db_multi_exec("INSERT INTO modreq(objid) VALUES(%d)", nrid);
  }
  db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid);
  db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", nrid);
  manifest_crosslink(nrid, pWiki, MC_NONE);
}










/*
** Output a selection box from which the user can select the
** wiki mimetype.
*/
void mimetype_option_menu(const char *zMimetype){
  unsigned i;
  @ <select name="mimetype" size="1">
  for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){
    if( fossil_strcmp(zMimetype,azStyles[i])==0 ){
      @ <option value="%s(azStyles[i])" selected>%s(azStyles[i+1])</option>
    }else{
      @ <option value="%s(azStyles[i])">%s(azStyles[i+1])</option>
    }
  }
  @ </select>
................................................................................
  @ through the matching &lt;/verbatim&gt;.</p></li>
  @ </ol>
  style_footer();
}

/*
** Add a new wiki page to the repository.  The page name is
** given by the zPageName parameter.  rid must be zero to create
** a new page otherwise the page identified by rid is updated.

**
** The content of the new page is given by the blob pContent.
**
** zMimeType specifies the N-card for the wiki page. If it is 0,
** empty, or "text/x-fossil-wiki" (the default format) then it is
** ignored.
*/
int wiki_cmd_commit(const char *zPageName, int rid, Blob *pContent,
                    const char *zMimeType, int localUser){
  Blob wiki;              /* Wiki page content */
  Blob cksum;             /* wiki checksum */

  char *zDate;            /* timestamp */
  char *zUuid;            /* uuid for rid */




















  blob_zero(&wiki);
  zDate = date_in_standard_format("now");
  blob_appendf(&wiki, "D %s\n", zDate);
  free(zDate);
  blob_appendf(&wiki, "L %F\n", zPageName );
  if( zMimeType && *zMimeType
      && 0!=fossil_strcmp(zMimeType,"text/x-fossil-wiki") ){
................................................................................
  blob_appendf(&wiki, "Z %b\n", &cksum);
  blob_reset(&cksum);
  db_begin_transaction();
  wiki_put(&wiki, 0, wiki_need_moderation(localUser));
  db_end_transaction(0);
  return 1;
}

/*
** Determine the rid for a tech note given either its id or its
** timestamp. Returns 0 if there is no such item and -1 if the details
** are ambiguous and could refer to multiple items.
*/
int wiki_technote_to_rid(const char *zETime) {
  int rid=0;                    /* Artifact ID of the tech note */
  int nETime = strlen(zETime);
  Stmt q;
  if( nETime>=4 && nETime<=UUID_SIZE && validate16(zETime, nETime) ){
    char zUuid[UUID_SIZE+1];
    memcpy(zUuid, zETime, nETime+1);
    canonical16(zUuid, nETime);
    db_prepare(&q,
      "SELECT e.objid"
      "  FROM event e, tag t"
      " WHERE e.type='e' AND e.tagid IS NOT NULL AND t.tagid=e.tagid"
      "   AND t.tagname GLOB 'event-%q*'",
      zUuid
    );
    if( db_step(&q)==SQLITE_ROW ){
      rid = db_column_int(&q, 0);
      if( db_step(&q)==SQLITE_ROW ) rid = -1;
    }
    db_finalize(&q);
  }
  if (!rid) {
    if (strlen(zETime)>4) {
      rid = db_int(0, "SELECT objid"
                      "  FROM event"
                      " WHERE datetime(mtime)=datetime('%q')"
                      "   AND type='e'"
                      "   AND tagid IS NOT NULL"
                      " ORDER BY objid DESC LIMIT 1",
                   zETime);
    }
  }
  return rid;
}

/*
** COMMAND: wiki*
**
** Usage: %fossil wiki (export|create|commit|list) WikiName
**
** Run various subcommands to work with wiki entries or tech notes.
**
**    %fossil wiki export PAGENAME ?FILE?
**    %fossil wiki export ?FILE? -t|--technote DATETIME|TECHNOTE-ID 
**
**       Sends the latest version of either a wiki page or of a tech note
**       to the given file or standard output. 

**       If PAGENAME is provided, the wiki page will be output. For
**       a tech note either DATETIME or TECHNOTE-ID must be specified. If 
**       DATETIME is used, the most recently modified tech note with that
**       DATETIME will be sent.
**
**    %fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS?
**              
**       Create a new or commit changes to an existing wiki page or 
**       technote from FILE or from standard input. PAGENAME is the
**       name of the wiki entry or the timeline comment of the
**       technote.
**
**       Options:
**         -M|--mimetype TEXT-FORMAT   The mime type of the update.
**                                     Defaults to the type used by
**                                     the previous version of the
**                                     page, or text/x-fossil-wiki.
**                                     Valid values are: text/x-fossil-wiki,
**                                     text/markdown and text/plain. fossil,
**                                     markdown or plain can be specified as
**                                     synonyms of these values.
**         -t|--technote DATETIME      Specifies the timestamp of
**                                     the technote to be created or
**                                     updated. When updating a tech note
**                                     the most recently modified tech note
**                                     with the specified timestamp will be
**                                     updated.
**         -t|--technote TECHNOTE-ID   Specifies the technote to be
**                                     updated by its technote id.
**         --technote-tags TAGS        The set of tags for a technote.
**         --technote-bgcolor COLOR    The color used for the technote
**                                     on the timeline.
**
**    %fossil wiki list ?OPTIONS?
**    %fossil wiki ls ?OPTIONS?
**
**       Lists all wiki entries, one per line, ordered
**       case-insensitively by name. 


**
**       Options:
**         -t|--technote               Technotes will be listed instead of
**                                     pages. The technotes will be in order
**                                     of timestamp with the most recent
**                                     first.
**         -s|--show-technote-ids      The id of the tech note will be listed
**                                     along side the timestamp. The tech note
**                                     id will be the first word on each line.
**                                     This option only applies if the
**                                     --technote option is also specified.
**
*/
void wiki_cmd(void){
  int n;
  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto wiki_cmd_usage;
................................................................................
      }
      if( zBody==0 ){
        fossil_fatal("wiki page [%s] not found",zPageName);
      }
      zFile = (g.argc==4) ? "-" : g.argv[4];
    }else{
      if( (g.argc!=3) && (g.argc!=4) ){
        usage("export ?FILE? --technote DATETIME|TECHNOTE-ID");
      }
      rid = wiki_technote_to_rid(zETime);
      if (rid == -1) {
        fossil_fatal("ambiguous tech note id: %s", zETime);


      }
      if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("technote [%s] not found",zETime);
      }
      zFile = (g.argc==3) ? "-" : g.argv[3];
    }
    for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){}
    zBody[i] = 0;
    blob_init(&body, zBody, -1);
    blob_append(&body, "\n", 1);
................................................................................
                     zPageName
                     );
        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }else{
        rid = wiki_technote_to_rid(zETime);




        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }
    }else{
      zMimeType = wiki_filter_mimetypes(zMimeType);
    }
    if( g.argv[2][1]=='r' && rid>0 ){
      if ( !zETime ){
        fossil_fatal("wiki page %s already exists", zPageName);
      }else{
        /* Creating a tech note with same timestamp is permitted
           and should create a new tech note */
        rid = 0; 
      }
    }else if( g.argv[2][1]=='o' && rid == 0 ){
      if ( !zETime ){
        fossil_fatal("no such wiki page: %s", zPageName);
      }else{
        fossil_fatal("no such tech note: %s", zETime);
      }
    }

    if( !zETime ){
      wiki_cmd_commit(zPageName, rid, &content, zMimeType, 1);
      if( g.argv[2][1]=='r' ){

        fossil_print("Created new wiki page %s.\n", zPageName);
      }else{

        fossil_print("Updated wiki page %s.\n", zPageName);
      }
    }else{
      if( rid != -1 ){
        char *zMETime;          /* Normalized, mutable version of zETime */
        zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))",
                          zETime);

        event_cmd_commit(zMETime, rid, &content, zMimeType, zPageName,
                         zTags, zClr);
        if( g.argv[2][1]=='r' ){
          fossil_print("Created new tech note %s.\n", zMETime);
        }else{


          fossil_print("Updated tech note %s.\n", zMETime);
        }
        free(zMETime);
      }else{
        fossil_fatal("ambiguous tech note id: %s", zETime);
      }
    }
    manifest_destroy(pWiki);
    blob_reset(&content);
  }else if( strncmp(g.argv[2],"delete",n)==0 ){
    if( g.argc!=5 ){
      usage("delete PAGENAME");
    }
    fossil_fatal("delete not yet implemented.");
  }else if(( strncmp(g.argv[2],"list",n)==0 )
          || ( strncmp(g.argv[2],"ls",n)==0 )){
    Stmt q;
    int showIds = 0;

    if ( !find_option("technote","t",0) ){
      db_prepare(&q,
        "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'"
        " ORDER BY lower(tagname) /*sort*/"
      );
    }else{
      showIds = find_option("show-technote-ids","s",0)!=0;
      db_prepare(&q,

        "SELECT datetime(e.mtime), substr(t.tagname,7)"
         " FROM event e, tag t"
        " WHERE e.type='e'"
          " AND e.tagid IS NOT NULL"
          " AND t.tagid=e.tagid"
        " ORDER BY e.mtime DESC /*sort*/"
      );
    }
  
    while( db_step(&q)==SQLITE_ROW ){
      const char *zName = db_column_text(&q, 0);
      if (showIds) {
        const char *zUuid = db_column_text(&q, 1);
        fossil_print("%s ",zUuid);
      }
      fossil_print( "%s\n",zName);
    }
    db_finalize(&q);
  }else{
    goto wiki_cmd_usage;
  }
  return;

wiki_cmd_usage:
  usage("export|create|commit|list ...");
}

Changes to test/merge6.test.

60
61
62
63
64
65
66
67
68
69
70
71
fossil merge branch_for_f3_f4
fossil commit -m "new trunk files f2, f3, and f4 via merge"
fossil ls

test merge_multi-4 {[normalize_result] eq {f1
f2
f3
f4}} knownBug

###############################################################################

test_cleanup







|




60
61
62
63
64
65
66
67
68
69
70
71
fossil merge branch_for_f3_f4
fossil commit -m "new trunk files f2, f3, and f4 via merge"
fossil ls

test merge_multi-4 {[normalize_result] eq {f1
f2
f3
f4}}

###############################################################################

test_cleanup

Added test/merge_exe.test.



























































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#
# Copyright (c) 2016 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# Testing changes to a file's execute bit caused by a merge
#

if {$tcl_platform(platform) eq "unix"} {
  proc setx {fn isexe} {
    file attributes $fn -permissions [expr {$isexe ? "+" : "-"}]x
  }

  proc test_exe {fn expected} {
    test merge_exe-$fn {[file executable $fn]==$expected}
  }
} else {
  # WARNING: This is a hack for setting and testing a file's execute bit
  # on Windows. Never operate directly on Fossil database files like this
  # unless you really need to and really know what you're doing.

  proc query {sql} {
    return [exec $::fossilexe sqlite3 --no-repository _FOSSIL_ $sql]
  }

  proc setx {fn isexe} {
    set isexe [expr {bool($isexe)}]
    query "UPDATE vfile SET isexe=$isexe WHERE pathname='$fn'"
  }

  proc test_exe {fn expected} {
    set result [query "SELECT isexe FROM vfile WHERE pathname='$fn'"]
    test merge_exe-$fn {$result==$expected}
  }
}

test_setup

write_file f1 "line"
write_file f2 "line"
write_file f3 "line"
write_file f4 "line"
fossil addremove
setx f3 1
setx f4 1
fossil commit -m "add files"

write_file f0 "f0"
fossil add f0
setx f0 1
fossil mv --hard f1 f1n
setx f1n 1
write_file f2 "line\nline2"
setx f2 1
write_file f3 "line\nline2"
setx f3 0
setx f4 0
fossil commit -b b -m "changes"

fossil update trunk
write_file f3 "line3\nline"
fossil commit -m "edit f3"

fossil merge b
test_status_list merge_exe-mrg $RESULT {
  EXECUTABLE f1
  EXECUTABLE f2
  UNEXEC f3
  UNEXEC f4
  UPDATE f2
  MERGE f3
  RENAME f1 -> f1n
  ADDED f0
}
foreach {fn isexe} {f0 1 f1n 1 f2 1 f3 0 f4 0} {
  test_exe $fn $isexe
}

###############################################################################

test_cleanup

Changes to test/merge_renames.test.

1
2
3
4





5
6
7
8
9
10
11
..
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
..
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
















168
169
170
171
172
173
174
...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192





193
194
195
196
197
198
199
200
201



202





































































































































































































































































































203
204
205
206
207
208
209
210
211
#
# Tests for merging with renames
#
#






require_no_open_checkout

######################################
#  Test 1                            #
#  Reported: Ticket [554f44ee74e3d]  #
######################################
................................................................................
write_file f1 "line5"
fossil commit -m "c4"

write_file f1 "line6"
fossil commit -m "c4"

fossil update pivot
fossil mv f1 f2
file rename -force f1 f2
fossil commit -b rename -m "c5"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update pivot
write_file f3 "someline"
fossil add f3
fossil commit -b branch2 -m "newbranch"

fossil merge trunk
puts $RESULT

set deletes 0
foreach {status filename} $RESULT {
    if {$status=="DELETE"} {
        set deletes [expr $deletes + 1]
    }
}

if {$deletes!=0} {
    # failed
    protOut "Error, the merge should not delete any file"
    test merge_renames-1 0
} else {
    test merge_renames-1 1
}

######################################
#  Test 2                            #
#  Reported: Ticket [74413366fe5067] #
######################################

test_setup
................................................................................
fossil commit -m "base file"
fossil tag add pivot current

write_file f2 "line2"
fossil add f2
fossil commit -m "newfile"

fossil mv f2 f2new
file rename -force f2 f2new
fossil commit -m "rename"

fossil update pivot
write_file f1 "line3"
fossil commit -b branch -m "change"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update trunk

fossil merge branch
puts $RESULT

# Not a nice way to check, but I don't know more tcl now
set deletes 0
foreach {status filename} $RESULT {
    if {$status=="DELETE"} {
        set deletes [expr $deletes + 1]
    }
}

if {$deletes!=0} {
    # failed
    protOut "Error, the merge should not delete any file"
    test merge_renames-2 0
} else {
    test merge_renames-2 1
}

######################################
#  Test 3                            #
#  Reported: Ticket [30b28cf351]     #
######################################

test_setup
................................................................................
fossil commit -m "base file"
fossil tag add pivot current

write_file f2 "line2"
fossil add f2
fossil commit -m "newfile"

fossil mv f2 f2new
file rename -force f2 f2new
fossil commit -m "rename"

fossil update pivot
write_file f1 "line3"
fossil commit -b branch -m "change"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update trunk

fossil merge branch
puts $RESULT

# Not a nice way to check, but I don't know more tcl now
set deletes 0
foreach {status filename} $RESULT {
    if {$status=="DELETE"} {
        set deletes [expr $deletes + 1]
    }
}

if {$deletes!=0} {
    # failed
    protOut "Error, the merge should not delete any file"
    test merge_renames-3 0
} else {
    test merge_renames-3 1
}

######################################
#  Test 4                            #
#  Reported: Ticket [67176c3aa4]     #
######################################

# TO BE WRITTEN.

















######################################
#  Test 5                            #
#  Handle Rename/Add via Merge       #
######################################

test_setup
................................................................................
fossil commit -m "base file"

write_file f3 "f3 line"
fossil add f3
fossil commit -m "branch file" -b branch_for_f3

fossil update trunk
fossil mv f1 f2
file rename -force f1 f2
write_file f1 "new f1 line"
fossil add f1
fossil commit -m "rename and add file with old name"

fossil update branch_for_f3
fossil merge trunk





fossil commit -m "trunk merged, should have 3 files"

fossil ls

test merge_renames-5 {[normalize_result] eq {f1
f2
f3}} knownBug

######################################



#





































































































































































































































































































# Tests for troubles not specifically linked with renames but that I'd like to
# write:
#  [c26c63eb1b] - 'merge --backout' does not handle conflicts properly
#  [953031915f] - Lack of warning when overwriting extra files
#  [4df5f38f1e] - Troubles merging a file delete with a file change

###############################################################################

test_cleanup




>
>
>
>
>







 







|
<











<
<
<
<
<
<
<
<
<
<
<
<
|
<
<
<







 







|
<












<
<
<
<
<
<
<
<
<
<
<
<
<
|
<
<
<







 







|
<












<
<
<
<
<
<
<
<
<
<
<
<
<
|
<
<
<






<
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







|
<






>
>
>
>
>




|

|

|
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>









1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
..
34
35
36
37
38
39
40
41

42
43
44
45
46
47
48
49
50
51
52












53



54
55
56
57
58
59
60
..
64
65
66
67
68
69
70
71

72
73
74
75
76
77
78
79
80
81
82
83













84



85
86
87
88
89
90
91
..
95
96
97
98
99
100
101
102

103
104
105
106
107
108
109
110
111
112
113
114













115



116
117
118
119
120
121

122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
...
148
149
150
151
152
153
154
155

156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
#
# Tests for merging with renames
#
#

proc commit_id {version} {
  regexp -line {^artifact:\s+(\S+)} [fossil whatis $version] - id
  return $id
}

require_no_open_checkout

######################################
#  Test 1                            #
#  Reported: Ticket [554f44ee74e3d]  #
######################################
................................................................................
write_file f1 "line5"
fossil commit -m "c4"

write_file f1 "line6"
fossil commit -m "c4"

fossil update pivot
fossil mv --hard f1 f2

fossil commit -b rename -m "c5"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update pivot
write_file f3 "someline"
fossil add f3
fossil commit -b branch2 -m "newbranch"

fossil merge trunk












test_status_list merge_renames-1 $RESULT {UPDATE f1}




######################################
#  Test 2                            #
#  Reported: Ticket [74413366fe5067] #
######################################

test_setup
................................................................................
fossil commit -m "base file"
fossil tag add pivot current

write_file f2 "line2"
fossil add f2
fossil commit -m "newfile"

fossil mv --hard f2 f2new

fossil commit -m "rename"

fossil update pivot
write_file f1 "line3"
fossil commit -b branch -m "change"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update trunk

fossil merge branch













test_status_list merge_renames-2 $RESULT {UPDATE f1}




######################################
#  Test 3                            #
#  Reported: Ticket [30b28cf351]     #
######################################

test_setup
................................................................................
fossil commit -m "base file"
fossil tag add pivot current

write_file f2 "line2"
fossil add f2
fossil commit -m "newfile"

fossil mv --hard f2 f2new

fossil commit -m "rename"

fossil update pivot
write_file f1 "line3"
fossil commit -b branch -m "change"

fossil merge trunk
fossil commit -m "trunk merged"

fossil update trunk

fossil merge branch













test_status_list merge_renames-3 $RESULT {UPDATE f1}




######################################
#  Test 4                            #
#  Reported: Ticket [67176c3aa4]     #
######################################


test_setup

write_file f1 "f1"
fossil add f1
fossil commit -m "add f1"

write_file f1 "f1.1"
fossil commit --branch b -m "change f1"

fossil update trunk
fossil mv --hard f1 f2
fossil commit -m "f1 -> f2"

fossil merge b
test_status_list merge_renames-4-1 $RESULT {UPDATE f2}
test_file_contents merge_renames-4-2 f2 "f1.1"

######################################
#  Test 5                            #
#  Handle Rename/Add via Merge       #
######################################

test_setup
................................................................................
fossil commit -m "base file"

write_file f3 "f3 line"
fossil add f3
fossil commit -m "branch file" -b branch_for_f3

fossil update trunk
fossil mv --hard f1 f2

write_file f1 "new f1 line"
fossil add f1
fossil commit -m "rename and add file with old name"

fossil update branch_for_f3
fossil merge trunk
test_status_list merge_renames-5-1 $RESULT {
  RENAME f1 -> f2
  ADDED f1
}

fossil commit -m "trunk merged, should have 3 files"

fossil ls

test merge_renames-5-2 {[normalize_result] eq {f1
f2
f3}}

#####################################
#  Test 6                           #
#  Merging a branch multiple times  #
#####################################

test_setup

write_file f1 "f1"
fossil add f1
fossil commit -m "add f1"

fossil mv --hard f1 f2
fossil commit -b b -m "f1 -> f2"

fossil update trunk
write_file f3 "f3"
write_file f4 "f4"
fossil add f3 f4
fossil ci -m "add f3, f4"

fossil mv --hard f3 f3-old
fossil mv --hard f4 f3
fossil mv --hard f3-old f4
fossil ci -m "swap f3 and f4"

write_file f1 "f1.1"
fossil commit -m "edit f1"

fossil update b
fossil merge trunk
fossil commit -m "merge trunk"

fossil update trunk
write_file f1 "f1.2"
write_file f3 "f3.1"
write_file f4 "f4.1"
fossil commit -m "edit f1, f4"

fossil update b
fossil merge trunk
test_status_list merge_renames-6-1 $RESULT {
  UPDATE f2
  UPDATE f3
  UPDATE f4
}
test_file_contents merge_renames-6-2 f2 "f1.2"
test_file_contents merge_renames-6-3 f3 "f3.1"
test_file_contents merge_renames-6-4 f4 "f4.1"

########################################################################
#  Test 7                                                              #
#  Merging with an uncommitted rename of a file that has been renamed  #
#  in the merged branch and adding a new file with the original name   #
########################################################################

test_setup

write_file f1 "f1"
fossil add f1
fossil commit -m "add f1"

fossil mv --hard f1 f2
write_file f2 "f2"
fossil commit -b b -m "f1 -> f2, edit f2"

fossil update trunk
fossil mv --hard f1 f3
write_file f1 "f1.1"
fossil add f1
fossil merge b
test_status_list merge_renames-7-1 $RESULT {UPDATE f3}
test_file_contents merge_renames-7-2 f1 "f1.1"
test_file_contents merge_renames-7-3 f3 "f2"

######################################################
#  Test 8                                            #
#  Merging two branches that both add the same file  #
######################################################

test_setup

write_file f1 "f1.1"
fossil add f1
fossil commit -b b1 -m "add f1"

fossil update trunk
write_file f1 "f1.2"
fossil add f1
fossil commit -b b2 -m "add f1"

fossil update trunk
fossil merge b1
fossil merge b2
test_status_list merge_renames-8-1 $RESULT {
  WARNING: no common ancestor for f1
}

fossil revert
fossil merge --integrate b1
fossil merge b2
test_status_list merge_renames-8-2 $RESULT {
  WARNING: no common ancestor for f1
}

#############################################
#  Test 9                                   #
#  Merging a delete/rename/add combination  #
#############################################

test_setup

write_file f1 "f1"
write_file f2 "f2"
fossil add f1 f2
fossil commit -m "add files"

fossil rm --hard f2
fossil commit -b b -m "delete f2"

fossil mv --hard f1 f2
fossil commit -m "f1 -> f2"

write_file f1 "f1.1"
fossil add f1
fossil commit -m "add new f1"

fossil update trunk
fossil merge b
set expectedMerge {
  DELETE f2
  RENAME f1 -> f2
  ADDED f1
}
test_status_list merge_renames-9-1 $RESULT $expectedMerge
fossil changes
test_status_list merge_renames-9-2 $RESULT "
  MERGED_WITH [commit_id b]
  ADDED_BY_MERGE f1
  RENAMED f2
  DELETED f2 (overwritten by rename)
"
test_file_contents merge_renames-9-3 f1 "f1.1"
test_file_contents merge_renames-9-4 f2 "f1"

# Undo and ensure a dry run merge results in no changes
fossil undo
test_status_list merge_renames-9-5 $RESULT {
  UNDO f1
  UNDO f2
}
fossil merge -n b
test_status_list merge_renames-9-6 $RESULT "
  $expectedMerge
  REMINDER: this was a dry run - no files were actually changed.
"
test merge_renames-9-7 {[fossil changes] eq ""}

###################################################################
#  Test 10                                                        #
#  Merge swapped filenames, backout the swap, then merge changes  #
###################################################################

test_setup

write_file f1 "f1"
write_file f2 "f2"
fossil add f1 f2
fossil commit -m "add files" ;# N

fossil mv --hard f1 f1-tmp
fossil mv --hard f2 f1
fossil mv --hard f1-tmp f2
fossil commit -b b -m "swap f1, f2" ;# P

fossil update trunk
fossil merge b
test_status_list merge_renames-10-1 $RESULT {
  RENAME f1 -> f2
  RENAME f2 -> f1
}
test_file_contents merge_renames-10-2 f1 "f2"
test_file_contents merge_renames-10-3 f2 "f1"
fossil commit -m "merge b"

fossil update b
write_file f1 f1.1
write_file f2 f2.1
fossil commit -m "edit" ;# M

fossil update trunk
fossil merge --backout trunk
test_status_list merge_renames-10-4 $RESULT {
  RENAME f1 -> f2
  RENAME f2 -> f1
}
test_file_contents merge_renames-10-5 f1 "f1"
test_file_contents merge_renames-10-6 f2 "f2"
test_status_list merge_renames-10-7 [fossil changes] "
  RENAMED f1
  RENAMED f2
  BACKOUT [commit_id trunk]
"
fossil commit -m "swap back" ;# V

fossil merge b
test_status_list merge_renames-10-8 $RESULT {
  UPDATE f1
  UPDATE f2
}

test_file_contents merge_renames-10-9 f1 "f2.1"
test_file_contents merge_renames-10-10 f2 "f1.1"

############################################
#  Test 11                                 #
#  Specifying a baseline                   #
############################################

test_setup

write_file f1 "line"
fossil add f1
fossil commit -m "add f1"

write_file f1 "line\nline2"
fossil commit -b b -m "edit f2" --tag p1

fossil mv --hard f1 f2
fossil commit -m "f1 -> f2"

write_file f2 "line\nline2\nline3"
fossil commit -m "edit f2" --tag p2

write_file f2 "line\nline2\nline3\nline4"
fossil commit -m "edit f2"

fossil update trunk
fossil merge --baseline p1 b
test_status_list merge_renames-11-1 $RESULT {
  MERGE f1
  RENAME f1 -> f2
}
test_file_contents merge_renames-11-2 f2 "line\nline3\nline4"
fossil revert
fossil merge --baseline p2 b
test_status_list merge_renames-11-3 $RESULT {MERGE f1}
test_file_contents merge_renames-11-4 f1 "line\nline4"

#################################################################
#  Test 12                                                      #
#  Merge involving a pivot that isn't a first-parent ancestor   #
#  of either the checked-out commit or the commit being merged  #
#################################################################

test_setup

write_file f1 "f1\n"
fossil add f1
fossil commit -m "add f1" --tag n

fossil mv --hard f1 f1n
fossil commit -m "f1 -> f1n"

fossil mv --hard f1n f1v
write_file f1v "f1v\n"
fossil commit -b v -m "f1n -> f1v, edit f1v"

fossil update trunk
fossil mv --hard f1n f1m
fossil commit -b m -m "f1n -> f1m"

fossil update n
fossil mv --hard f1 f1p
write_file f1p "f1\np"
fossil commit -b p -m "f1 -> f1p, edit f1p"

fossil update m
fossil merge p
test_status_list merge_renames-12-1 $RESULT {UPDATE f1m}
test_file_contents merge_renames-12-2 f1m "f1\np"
fossil commit -m "merge p"

write_file f1m "f1\nm"
fossil commit -m "edit f1m"

fossil update v
fossil merge p
test_status_list merge_renames-12-3 $RESULT {MERGE f1v}
test_file_contents merge_renames-12-4 f1v "f1v\np"
fossil commit -m "merge p"

fossil merge m
test_status_list merge_renames-12-5 $RESULT {MERGE f1v}
test_file_contents merge_renames-12-6 f1v "f1v\nm"
fossil commit -m "merge m"

######################################
#
# Tests for troubles not specifically linked with renames but that I'd like to
# write:
#  [c26c63eb1b] - 'merge --backout' does not handle conflicts properly
#  [953031915f] - Lack of warning when overwriting extra files
#  [4df5f38f1e] - Troubles merging a file delete with a file change

###############################################################################

test_cleanup

Changes to test/tester.tcl.

388
389
390
391
392
393
394



















395
396
397
398
399
400
401
    test $name 1 $constraints
  } else {
    protOut "  Expected:\n    [join $expected "\n    "]" 1
    protOut "  Got:\n    [join $result "\n    "]" 1
    test $name 0 $constraints
  }
}




















# Append all arguments into a single value and then returns it.
#
proc appendArgs {args} {
  eval append result $args
}








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
    test $name 1 $constraints
  } else {
    protOut "  Expected:\n    [join $expected "\n    "]" 1
    protOut "  Got:\n    [join $result "\n    "]" 1
    test $name 0 $constraints
  }
}

# Perform a test on the contents of a file
#
proc test_file_contents {name path expected {constraints ""}} {
  if {[file exists $path]} {
    set result [read_file $path]
    set passed [expr {$result eq $expected}]
    if {!$passed} {
      set expectedLines [split $expected "\n"]
      set resultLines [split $result "\n"]
      protOut "  Expected:\n    [join $expectedLines "\n    "]" 1
      protOut "  Got:\n    [join $resultLines "\n    "]" 1
    }
  } else {
    set passed 0
    protOut "  File does not exist: $path" 1
  }
  test $name $passed $constraints
}

# Append all arguments into a single value and then returns it.
#
proc appendArgs {args} {
  eval append result $args
}

Added test/wiki.test.































































































































































































































































































































































































































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
#
# Copyright (c) 2016 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# Test wiki and attachment command Support
#

test_setup

# Return true if two files are similar (i.e. not only compress trailing spaces
# from a line, but remove any final LF from the file as well)
proc similar_file {a b} {
  set x [read_file $a]
  regsub -all { +\n} $x \n x
  regsub -all {\n$} $x {} x
  set y [read_file $b]
  regsub -all { +\n} $y \n y
  regsub -all {\n$} $y {} y
  return [expr {$x==$y}]
}

# Return the mime type in the manifest for a given wiki page
# Defaults to "error: some text" if the manifest can't be located and
# "text/x-fossil-wiki" (the default mimetype for rendering)
# if the N card is omitted in the manifest.
# Note: Makes fossil calls, so $CODE and $RESULT will be corrupted
proc get_mime_type {name} {
  global CODE RESULT
  fossil http << "GET /wiki?name=$name"
  if {$CODE != 0} {
    return error: /wiki?name=$name $CODE $RESULT"
  }
  set CODE [regexp {href="/info/([0-9a-f]+)"} $RESULT match info]
  if {$CODE == 0} {
    return "error: No info link found for wiki page $name"
  }
  fossil http << "GET /artifact/$info"
  if {$CODE != 0} {
    return "error: /artifact/$info $CODE $RESULT"
  }
  set CODE [regexp {<pre>(.*)</pre>} $RESULT match pre]
  if {$CODE == 0} {
    return "error: No pre block in /artifact/$info"
  }
  set CODE [regexp -line {^N (.*)$} $pre match mimetype]
  if {$CODE == 0} {
    return "text/x-fossil-wiki"
  }
  return $mimetype
}


###############################################################################
# Initially there should be no wiki entries
fossil wiki list
test wiki-0 {[normalize_result] eq {}}

###############################################################################
# Adding an entry should add it to the wiki list
write_file f1 "first wiki note"
fossil wiki create tcltest f1
test wiki-1 {$CODE == 0}
fossil wiki list
test wiki-2 {[normalize_result] eq {tcltest}}

###############################################################################
# Trying to add the same entry should fail
fossil wiki create tcltest f1 -expectError
test wiki-3 {$CODE != 0}

###############################################################################
# exporting the wiki page should give back similar text
fossil wiki export tcltest a1
test wiki-4 {[similar_file f1 a1]}

###############################################################################
# commiting a change to an existing page should replace the page on export
write_file f2 "second version of the page"
fossil wiki commit tcltest f2
test wiki-5 {$CODE == 0}
fossil wiki export tcltest a2
test wiki-6 {[similar_file f2 a2]}

###############################################################################
# But we shouldn't be able to update non-existant pages
fossil wiki commit doesntexist f1 -expectError
test wiki-7 {$CODE != 0}

###############################################################################
# There shouldn't be any tech notes at this point
fossil wiki list --technote
test wiki-8 {[normalize_result] eq {}}

###############################################################################
# Creating a tech note with a specified timestamp should add a technote
write_file f3 "A technote"
fossil wiki create technote f3 --technote {2016-01-01 12:34}
test wiki-9 {$CODE == 0}
fossil wiki list --technote
test wiki-10 {[normalize_result] eq {2016-01-01 12:34:00}}
fossil wiki list --technote --show-technote-ids
set technotelist [split $RESULT "\n"]
set veryfirsttechnoteid [lindex [split [lindex $technotelist 0]] 0]

###############################################################################
# exporting that technote should give back similar text
fossil wiki export a3 --technote {2016-01-01 12:34:00}
test wiki-11 {[similar_file f3 a3]}

###############################################################################
# Trying to add a technote with the same timestamp should succeed and create a
# second tech note
fossil wiki create 2ndnote f3 -technote {2016-01-01 12:34}
test wiki-13 {$CODE == 0}
fossil wiki list --technote
set technotelist [split $RESULT "\n"] 
test wiki-13.1 {[llength $technotelist] == 2}

###############################################################################
# commiting a change to an existing technote should replace the page on export
# (this should update the tech note from wiki-13 as that the most recently
# updated one, that should also be the one exported by the export command)
write_file f4 "technote 2nd variant"
fossil wiki commit technote f4 --technote {2016-01-01 12:34}
test wiki-14 {$CODE == 0}
fossil wiki export a4 --technote {2016-01-01 12:34}
test wiki-15 {[similar_file f4 a4]}
# Also check that the tech note with the same timestamp, but modified less
# recently still has its original text
fossil wiki export a4.1 --technote $veryfirsttechnoteid
test wiki-15.1 {[similar_file f3 a4.1]}

###############################################################################
# But we shouldn't be able to update non-existant pages
fossil wiki commit doesntexist f1 -expectError
test wiki-16 {$CODE != 0}

###############################################################################
# Check specifying tags for a technote is OK 
write_file f5 "technote with tags"
fossil wiki create {tagged technote} f5 --technote {2016-01-02 12:34} --technote-tags {A B}
test wiki-17 {$CODE == 0}
write_file f5.1 "editted and tagged technote"
fossil wiki commit {tagged technote} f5 --technote {2016-01-02 12:34} --technote-tags {C D}
test wiki-18 {$CODE == 0}

###############################################################################
# Check specifying a bgcolor for a technote is OK
write_file f6 "bgcolored technote"
fossil wiki create bgcolor f6 --technote {2016-01-03 12:34} --technote-bgcolor red
test wiki-19 {$CODE == 0}
write_file f6.1 "editted technote with a background color"
fossil wiki commit bgcolor f6.1 --technote {2016-01-03 12:34} --technote-bgcolor yellow
test wiki-20 {$CODE == 0}

###############################################################################
# Test adding an attachment to both a non-existant (should fail) and existing wiki page
write_file fa "This is a file to be attached"
fossil attachment add doesntexist fa -expectError
test wiki-21 {$CODE != 0}
fossil attachment add tcltest fa
test wiki-22 {$CODE == 0}

###############################################################################
# Test adding an attachment to both a non-existant (should fail) and existing tech note
fossil attachment add fa --technote {2016-07-22 12:00} -expectError
test wiki-23 {$CODE != 0}
fossil attachment add fa --technote {2016-01-03 12:34}
test wiki-24 {$CODE == 0}

###############################################################################
# Check that a wiki page with an attachment can be updated
fossil wiki commit tcltest f1
test wiki-25 {$CODE == 0}

###############################################################################
# Check that a technote with an attachment can be updated
fossil wiki commit technote f6 --technote {2016-01-03 12:34}
test wiki-26 {$CODE == 0}
fossil wiki commit technote f6 --technote {2016-01-03 12:34} --technote-tags {E F}
test wiki-27 {$CODE == 0}
fossil wiki commit technote f6 --technote {2016-01-03 12:34} --technote-bgcolor blue
test wiki-28 {$CODE == 0}

###############################################################################
# Check longest form of timestamp for the technote
write_file f7 "Different timestamps"
fossil wiki create technotenow f7 --technote {2016-01-04 12:34:56+00:00}
test wiki-29 {$CODE == 0}

###############################################################################
# Check a technote appears on the timeline
write_file f8 "Contents of a 'unique' tech note"
fossil wiki create {Unique technote} f8 --technote {2016-01-05 01:02:03}
fossil timeline
test wiki-30 {[string match *Unique*technote* $RESULT]}

###############################################################################
# Check for a collision between an attachment and a note, this was a
# bug that resulted from some code treating the attachment entry as if it 
# were a technote when it isn't really. 
#
# First, wait for the top of the next second so the attachment
# happens at a known time, then add an attachment to an existing note
# and a new note immediately after. 

set t0 [clock seconds]
while {$t0 == [clock seconds]} {
  after 100
}
set t1 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"]
write_file f9 "Timestamp: $t1"
fossil attachment add f9 --technote {2016-01-05 01:02:03}
test wiki-31 {$CODE == 0}
fossil wiki create {Attachment collision} f9 --technote now
test wiki-32 {$CODE == 0}
#
# Now waste time until the next second so that the remaining tests
# don't have to worry about a potential collision
set t0 [clock seconds]
while {$t0 == [clock seconds]} {
  after 100
}

###############################################################################
# Check a technote with no timestamp cannot be created, but that
# "now" is a valid stamp.
set t2 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"]
write_file f10 "Even unstampted notes are delivered.\nStamped $t2"
fossil wiki create "Unstamped Note" f10 --technote -expectError
test wiki-33 {$CODE != 0}
fossil wiki create "Unstamped Note" f10 --technote now
test wiki-34 {$CODE == 0}
fossil wiki list -t 
test wiki-35 {[string match "*$t2*" $RESULT]}

###############################################################################
# Check an attachment to it in the same second works. 
write_file f11 "Time Stamp was $t2"
fossil attachment add f11 --technote $t2
test wiki-36 {$CODE == 0}
fossil timeline
test wiki-36-1 {$CODE == 0}
fossil wiki list -t
test wiki-36-2 {$CODE == 0}

###############################################################################
# Check that we have the expected number of tech notes on the list (and not 
# extra ones from other events (such as the attachments) - 8 tech notes 
# expected created by tests 9, 13, 17, 19, 29, 31, 32 and 34 
fossil wiki list --technote
set technotelist [split $RESULT "\n"] 
test wiki-37 {[llength $technotelist] == 8}

###############################################################################
# Check that using the show-technote-ids shows the same tech notes in the same
# order (with the technote id as the first word of the line)
fossil wiki list --technote --show-technote-ids
set technoteidlist [split $RESULT "\n"]
test wiki-38 {[llength $technotelist] == 8}
for {set i 0} {$i < [llength $technotelist]} {incr i} {
  set match "???????????????????????????????????????? "
  append match [lindex $technotelist $i]
  test "wiki-39-$i" {[string match $match [lindex $technoteidlist $i]]}
}

###############################################################################
# Create new tech note with a old timestamp so that it is oldest and then check that
# the contents of the oldest tech note (by tech note id, both full and short) match up
write_file f12 "A really old tech note"
fossil wiki create {Old tech note} f12 --technote {2001-07-07 09:08:07}
fossil wiki list --technote --show-technote-ids
set technotelist [split $RESULT "\n"]
set anoldtechnoteid [lindex [split [lindex $technotelist [llength $technotelist]-1]] 0]
fossil wiki export a12 --technote $anoldtechnoteid
test wiki-40 {[similar_file f12 a12]}

###############################################################################
# Also check that we can specify a prefix of the tech note id (note: with
# 9 items in the tech note at this point there is a chance of a collision.
# However with a 20 character prefix the chance of the collision is 
# approximately 1 in 10^22 so this test ignores that possibility.)
fossil wiki export a12.1 --technote [string range $anoldtechnoteid 0 20]
test wiki-41 {[similar_file f12 a12.1]}

###############################################################################
# Now we need to force a collision in the first four characters of the tech
# note id if we don't already have one so we can check we get an error if the
# tech note id is ambiguous
set idcounts [dict create]
set maxcount 0
fossil wiki list --technote --show-technote-ids
set technotelist [split $RESULT "\n"]
for {set i 0} {$i < [llength $technotelist]} {incr i} {
  set fullid [lindex $technotelist $i]
  set id [string range $fullid 0 3]
  dict incr idcounts $id
  if {[dict get $idcounts $id] > $maxcount} {
    set maxid $id
    incr maxcount 
  }
}
# get i so that, as a julian date, it is in the 1800s, i.e., older than
# any other tech note, but after 1 AD
set i 2400000
while {$maxcount < 2} {
  # keep getting older
  incr i -1
  write_file f13 "A tech note with timestamp of jday=$i"
  fossil wiki create "timestamp of $i" f13 --technote "$i"
  fossil wiki list --technote --show-technote-ids
  set technotelist [split $RESULT "\n"]
  set oldesttechnoteid [lindex [split [lindex $technotelist [llength $technotelist]-1]] 0]
  set id [string range $oldesttechnoteid 0 3]
  dict incr idcounts $id
  if {[dict get $idcounts $id] > $maxcount} {
    set maxid $id
    incr maxcount 
  }
}
# Save the duplicate id for this and later tests
set duplicateid $maxid
fossil wiki export a13 --technote $duplicateid -expectError
test wiki-42 {$CODE != 0}

###############################################################################
# Check we can update technote by its id
write_file f14 "Updated text for the really old tech note"
fossil wiki commit {Old tech note} f14 --technote $anoldtechnoteid
fossil wiki export a14 --technote $anoldtechnoteid
test wiki-43 {[similar_file f14 a14]}

###############################################################################
# Check we can add attachments to a technote by its id
fossil attachment add fa --technote $anoldtechnoteid
test wiki-44 {$CODE == 0}

###############################################################################
# Also check that we can specify a prefix of the tech note id
write_file f15 "Updated text for the really old tech note specified by its id"
fossil wiki commit {Old tech note} f15 --technote [string range $anoldtechnoteid 0 20]
fossil wiki export a15 --technote $anoldtechnoteid
test wiki-45 {[similar_file f15 a15]}

###############################################################################
# Check we can add attachments to a technote by a prefix of its id
fossil attachment add fa --technote [string range $anoldtechnoteid 0 20]
test wiki-46 {$CODE == 0}

###############################################################################
# And we get an error for the ambiguous tech note id
fossil wiki commit {Old tech note} f15 --technote $duplicateid -expectError
test wiki-47 {$CODE != 0}
fossil attachment add fa --technote $duplicateid -expectError
test wiki-48 {$CODE != 0}

###############################################################################
# Check the default mimetype is text/x-fossil-wiki
test wiki-49 {[get_mime_type tcltest] == "text/x-fossil-wiki"}

###############################################################################
# Check long form of the mimetypes are recorded correctly
fossil wiki create tcltest-x-fossil f1 -mimetype text/x-fossil-wiki
test wiki-50 {[get_mime_type tcltest-x-fossil] == "text/x-fossil-wiki"}
fossil wiki create tcltest-x-markdown f1 -mimetype text/x-markdown
test wiki-51 {[get_mime_type tcltest-x-markdown] == "text/x-markdown"}
fossil wiki create tcltest-plain f1 -mimetype text/plain
test wiki-52 {[get_mime_type tcltest-plain] == "text/plain"}
fossil wiki create tcltest-x-random f1 -mimetype text/x-random
test wiki-53 {[get_mime_type tcltest-x-random] == "text/x-fossil-wiki"}

###############################################################################
# Check short form of the mimetypes are recorded correctly
fossil wiki create tcltest-x-fossil-short f1 -mimetype wiki
test wiki-54 {[get_mime_type tcltest-x-fossil-short] == "text/x-fossil-wiki"}
fossil wiki create tcltest-x-markdown-short f1 -mimetype markdown
test wiki-55 {[get_mime_type tcltest-x-markdown-short] == "text/x-markdown"}
fossil wiki create tcltest-plain-short f1 -mimetype plain
test wiki-56 {[get_mime_type tcltest-plain-short] == "text/plain"}
fossil wiki create tcltest-x-random-short f1 -mimetype random
test wiki-57 {[get_mime_type tcltest-x-random-short] == "text/x-fossil-wiki"}


###############################################################################
test_cleanup

Added www/encryptedrepos.wiki.











































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
<title>How To Use Encrypted Repositories</title>
<h2>Introduction</h2><blockquote>
Fossil can be compiled so that it works with encrypted repositories using
the [https://www.sqlite.org/see/doc/trunk/www/readme.wiki|SQLite Encryption Extension].
This technical note explains the process.
</blockquote>
<h2>Building An Encryption-Enabled Fossil</h2><blockquote>
The SQLite Encryption Extension (SEE) is proprietary software and requires 
[http://www.hwaci.com/cgi-bin/see-step1|purchasing a license].
<p>
Assuming you have an SEE license, the first step of compiling Fossil to
use SEE is to create an SEE-enabled version of the SQLite database source code.
This alternative SQLite database source file should be called "sqlite3-see.c"
and should be placed in the src/ subfolder of the Fossil sources, right beside
the public-domain "sqlite3.c" source file.
<p>
Add the --with-see command-line option to the configuration script to enable
the use of SEE on unix-like systems.
<blockquote><pre>
./configure --with-see; make
</pre></blockquote>
<p>To build for Windows using MSVC, add
the "USE_SEE=1" argument to the "nmake" command line.
<blockquote><pre>
nmake -f makefile.msc USE_SEE=1
</pre></blockquote>
</blockquote>
<h2>Using Encrypted Repositories</h2><blockquote>
Any Fossil repositories whose filename ends with ".efossil" is taken to be
an encrypted repository.  Fossil will prompt for the encryption password and
attempt to open the repository database using that password.
<p>
Every use of an encrypted repository requires retyping the encryption password.
<p>
On Windows, the "fossil server" and "fossil ui" commands do not
(currently) work on an encrypted repository.
</blockquote>