Fossil

Check-in [abd131b8]
Login

Check-in [abd131b8]

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:Merge updates from trunk.
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | mvHardDirFix
Files: files | file ages | folders
SHA1: abd131b83c2a37c583cbff17a298e03e39ab04f0
User & Date: mistachkin 2016-03-06 06:26:31.961
Context
2016-03-11
23:38
Fix compilation issues cause by the trunk merge. ... (check-in: 12453740 user: mistachkin tags: mvHardDirFix)
2016-03-06
06:26
Merge updates from trunk. ... (check-in: abd131b8 user: mistachkin tags: mvHardDirFix)
2016-03-05
20:01
Added add_content_sql_commands() to /admin_sql, as per ML discussion. ... (check-in: 93f514ca user: stephan tags: trunk)
2015-07-29
18:44
Candidate fix for directory renaming issue with the --hard option as reported via the mailing list. ... (check-in: b86127e1 user: mistachkin tags: mvHardDirFix)
Changes
Unified Diff Ignore Whitespace Patch Hide diffs
Changes to Dockerfile.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

16
17
18
19
20
21
22
23
24
25
26
###
#   Dockerfile for Fossil
###
FROM fedora:21

### Now install some additional parts we will need for the build
RUN yum update -y && yum install -y gcc make zlib-devel openssl-devel tar && yum clean all && groupadd -r fossil -g 433 && useradd -u 431 -r -g fossil -d /opt/fossil -s /sbin/nologin -c "Fossil user" fossil

### If you want to build "release", change the next line accordingly.
ENV FOSSIL_INSTALL_VERSION trunk

RUN curl "http://core.tcl.tk/tcl/tarball/tcl-src.tar.gz?name=tcl-src&uuid=release" | tar zx
RUN cd tcl-src/unix && ./configure --prefix=/usr --disable-shared --disable-threads --disable-load && make && make install
RUN curl "http://www.fossil-scm.org/index.html/tarball/fossil-src.tar.gz?name=fossil-src&uuid=${FOSSIL_INSTALL_VERSION}" | tar zx
RUN cd fossil-src && ./configure --disable-fusefs --json --with-th1-docs --with-th1-hooks --with-tcl

RUN cd fossil-src && make && strip fossil && cp fossil /usr/bin && cd .. && rm -rf fossil-src && chmod a+rx /usr/bin/fossil && mkdir -p /opt/fossil && chown fossil:fossil /opt/fossil

### Build is done, remove modules no longer needed
RUN yum remove -y gcc make zlib-devel openssl-devel tar && yum clean all

USER fossil

ENV HOME /opt/fossil

EXPOSE 8080



|


|

|
|


|

|
>



|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
###
#   Dockerfile for Fossil
###
FROM fedora:23

### Now install some additional parts we will need for the build
RUN dnf update -y && dnf install -y gcc make zlib-devel openssl-devel tar && dnf clean all && groupadd -r fossil -g 433 && useradd -u 431 -r -g fossil -d /opt/fossil -s /sbin/nologin -c "Fossil user" fossil

### If you want to build "trunk", change the next line accordingly.
ENV FOSSIL_INSTALL_VERSION release

RUN curl "http://core.tcl.tk/tcl/tarball/tcl-src.tar.gz?name=tcl-src&uuid=release" | tar zx
RUN cd tcl-src/unix && ./configure --prefix=/usr --disable-load && make && make install
RUN curl "http://www.fossil-scm.org/index.html/tarball/fossil-src.tar.gz?name=fossil-src&uuid=${FOSSIL_INSTALL_VERSION}" | tar zx
RUN cd fossil-src && ./configure --disable-fusefs --json --with-th1-docs --with-th1-hooks --with-tcl --with-tcl-stubs --with-tcl-private-stubs
RUN cd fossil-src/src && mv main.c main.c.orig && sed s/\"now\"/0/ <main.c.orig >main.c
RUN cd fossil-src && make && strip fossil && cp fossil /usr/bin && cd .. && rm -rf fossil-src && chmod a+rx /usr/bin/fossil && mkdir -p /opt/fossil && chown fossil:fossil /opt/fossil

### Build is done, remove modules no longer needed
RUN dnf remove -y gcc make zlib-devel openssl-devel tar && dnf clean all

USER fossil

ENV HOME /opt/fossil

EXPOSE 8080

Changes to Makefile.classic.
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
TCC += -DFOSSIL_DYNAMIC_BUILD=1

#### Extra arguments for linking the finished binary.  Fossil needs
#    to link against the Z-Lib compression library unless the miniz
#    library in the source tree is being used.  There are no other
#    required dependencies.
ZLIB_LIB.0 = -lz
ZLIB_LIB.1 = 
ZLIB_LIB.  = $(ZLIB_LIB.0)

# If using zlib:
LIB += $(ZLIB_LIB.$(FOSSIL_ENABLE_MINIZ)) $(LDFLAGS)

# If using HTTPS:
LIB += -lcrypto -lssl






|







50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
TCC += -DFOSSIL_DYNAMIC_BUILD=1

#### Extra arguments for linking the finished binary.  Fossil needs
#    to link against the Z-Lib compression library unless the miniz
#    library in the source tree is being used.  There are no other
#    required dependencies.
ZLIB_LIB.0 = -lz
ZLIB_LIB.1 =
ZLIB_LIB.  = $(ZLIB_LIB.0)

# If using zlib:
LIB += $(ZLIB_LIB.$(FOSSIL_ENABLE_MINIZ)) $(LDFLAGS)

# If using HTTPS:
LIB += -lcrypto -lssl
Changes to Makefile.in.
38
39
40
41
42
43
44

45
46
47
48
49
50
#
TCLSH = tclsh

LIB =	@LDFLAGS@ @EXTRA_LDFLAGS@ @LIBS@
TCCFLAGS =	@EXTRA_CFLAGS@ @CPPFLAGS@ @CFLAGS@ -DHAVE_AUTOCONFIG_H -D_HAVE_SQLITE_CONFIG_H
INSTALLDIR = $(DESTDIR)@prefix@/bin
USE_SYSTEM_SQLITE = @USE_SYSTEM_SQLITE@

FOSSIL_ENABLE_MINIZ = @FOSSIL_ENABLE_MINIZ@

include $(SRCDIR)/main.mk

distclean: clean
	rm -f autoconfig.h config.log Makefile






>






38
39
40
41
42
43
44
45
46
47
48
49
50
51
#
TCLSH = tclsh

LIB =	@LDFLAGS@ @EXTRA_LDFLAGS@ @LIBS@
TCCFLAGS =	@EXTRA_CFLAGS@ @CPPFLAGS@ @CFLAGS@ -DHAVE_AUTOCONFIG_H -D_HAVE_SQLITE_CONFIG_H
INSTALLDIR = $(DESTDIR)@prefix@/bin
USE_SYSTEM_SQLITE = @USE_SYSTEM_SQLITE@
USE_LINENOISE = @USE_LINENOISE@
FOSSIL_ENABLE_MINIZ = @FOSSIL_ENABLE_MINIZ@

include $(SRCDIR)/main.mk

distclean: clean
	rm -f autoconfig.h config.log Makefile
Changes to VERSION.
1
1.33
|
1
1.35
Changes to ajax/i-test/rhino-test.js.
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
        if(!TestApp.verbose) return;
        print("ERROR: "+WhAjaj.stringify(opt));
    };
    cb.onResponse = function(resp,req){
        if(!TestApp.verbose) return;
        print("GOT RESPONSE: "+(('string'===typeof resp) ? resp : WhAjaj.stringify(resp)));
    };
    
})();

/**
    Throws an exception of cond is a falsy value.
*/
function assert(cond, descr){
    descr = descr || "Undescribed condition.";






|







40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
        if(!TestApp.verbose) return;
        print("ERROR: "+WhAjaj.stringify(opt));
    };
    cb.onResponse = function(resp,req){
        if(!TestApp.verbose) return;
        print("GOT RESPONSE: "+(('string'===typeof resp) ? resp : WhAjaj.stringify(resp)));
    };

})();

/**
    Throws an exception of cond is a falsy value.
*/
function assert(cond, descr){
    descr = descr || "Undescribed condition.";
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
}
testHAI.description = 'Get server version info.';

function testIAmNobody(){
    TestApp.fossil.whoami('/json/whoami');
    assert('nobody' === TestApp.fossil.auth.name, 'User == nobody.' );
    assert(!TestApp.fossil.auth.authToken, 'authToken is not set.' );
   
}
testIAmNobody.description = 'Ensure that current user is "nobody".';


function testAnonymousLogin(){
    TestApp.fossil.login();
    assert('string' === typeof TestApp.fossil.auth.authToken, 'authToken = '+TestApp.fossil.auth.authToken);






|







125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
}
testHAI.description = 'Get server version info.';

function testIAmNobody(){
    TestApp.fossil.whoami('/json/whoami');
    assert('nobody' === TestApp.fossil.auth.name, 'User == nobody.' );
    assert(!TestApp.fossil.auth.authToken, 'authToken is not set.' );

}
testIAmNobody.description = 'Ensure that current user is "nobody".';


function testAnonymousLogin(){
    TestApp.fossil.login();
    assert('string' === typeof TestApp.fossil.auth.authToken, 'authToken = '+TestApp.fossil.auth.authToken);
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    osb.write(json,0, json.length);
    osb.close();
    req = json = outs = osr = osb = undefined;
    var ins = p.getInputStream();
    var isr = new java.io.InputStreamReader(ins);
    var br = new java.io.BufferedReader(isr);
    var line;
    
    while( null !== (line=br.readLine())){
        print(line);
    }
    br.close();
    isr.close();
    ins.close();
    p.waitFor();






|







219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    osb.write(json,0, json.length);
    osb.close();
    req = json = outs = osr = osb = undefined;
    var ins = p.getInputStream();
    var isr = new java.io.InputStreamReader(ins);
    var br = new java.io.BufferedReader(isr);
    var line;

    while( null !== (line=br.readLine())){
        print(line);
    }
    br.close();
    isr.close();
    ins.close();
    p.waitFor();
Changes to ajax/index.html.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
	"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">

<head>
	<title>Fossil/JSON raw request sending</title>
	<meta http-equiv="content-type" content="text/html;charset=utf-8" />
    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script> 
    <script type="text/javascript" src="js/whajaj.js"></script>
    <script type="text/javascript" src="js/fossil-ajaj.js"></script>

<style type='text/css'>
th {
  text-align: left;
  background-color: #ececec;  
}

.dangerWillRobinson {
    background-color: yellow;
}
</style>







|






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
	"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">

<head>
	<title>Fossil/JSON raw request sending</title>
	<meta http-equiv="content-type" content="text/html;charset=utf-8" />
    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script>
    <script type="text/javascript" src="js/whajaj.js"></script>
    <script type="text/javascript" src="js/fossil-ajaj.js"></script>

<style type='text/css'>
th {
  text-align: left;
  background-color: #ececec;
}

.dangerWillRobinson {
    background-color: yellow;
}
</style>

Changes to ajax/wiki-editor.html.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
	"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">

<head>
	<title>Fossil/JSON Wiki Editor Prototype</title>
	<meta http-equiv="content-type" content="text/html;charset=utf-8" />
    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script> 
    <script type="text/javascript" src="js/whajaj.js"></script>
    <script type="text/javascript" src="js/fossil-ajaj.js"></script>

<style type='text/css'>
th {
  text-align: left;
  background-color: #ececec;  
}

.dangerWillRobinson {
    background-color: yellow;
}

.wikiPageLink {






|






|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
	"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">

<head>
	<title>Fossil/JSON Wiki Editor Prototype</title>
	<meta http-equiv="content-type" content="text/html;charset=utf-8" />
    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js"></script>
    <script type="text/javascript" src="js/whajaj.js"></script>
    <script type="text/javascript" src="js/fossil-ajaj.js"></script>

<style type='text/css'>
th {
  text-align: left;
  background-color: #ececec;
}

.dangerWillRobinson {
    background-color: yellow;
}

.wikiPageLink {
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    TheApp.refreshPageListView = function(){
        var list = (function(){
            var k, v, li = [];
            for( k in TheApp.pages ){
                if(!TheApp.pages.hasOwnProperty(k)) continue;
                li.push(k);
            }
            return li;                
        })();
        var i, p, a, tgt = TheApp.jqe.pageListArea;
        tgt.text('');
        function makeLink(name){
            var link = jQuery('<span></span>');
            link.text(name);
            link.addClass('wikiPageLink');






|







213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    TheApp.refreshPageListView = function(){
        var list = (function(){
            var k, v, li = [];
            for( k in TheApp.pages ){
                if(!TheApp.pages.hasOwnProperty(k)) continue;
                li.push(k);
            }
            return li;
        })();
        var i, p, a, tgt = TheApp.jqe.pageListArea;
        tgt.text('');
        function makeLink(name){
            var link = jQuery('<span></span>');
            link.text(name);
            link.addClass('wikiPageLink');
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
See also: <a href='index.html'>main test page</a>.

<br>
<b>Login:</b>
<br/>
<input type='button' value='Anon. Login' onclick='TheApp.cgi.login()' />
or: 
name:<input type='text' id='textUser' value='json-demo' size='12'/>
pw:<input type='password' id='textPassword' value='json-demo' size='12'/>
<input type='button' value='login' onclick='TheApp.cgi.login(jQuery("#textUser").val(),jQuery("#textPassword").val(),{onResponse:TheApp.onLogin})' />
<input type='button' value='logout' onclick='TheApp.cgi.logout()' />

<br/>
<span id='currentAuthToken' style='font-family:monospaced'></span>






|







320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
See also: <a href='index.html'>main test page</a>.

<br>
<b>Login:</b>
<br/>
<input type='button' value='Anon. Login' onclick='TheApp.cgi.login()' />
or:
name:<input type='text' id='textUser' value='json-demo' size='12'/>
pw:<input type='password' id='textPassword' value='json-demo' size='12'/>
<input type='button' value='login' onclick='TheApp.cgi.login(jQuery("#textUser").val(),jQuery("#textPassword").val(),{onResponse:TheApp.onLogin})' />
<input type='button' value='logout' onclick='TheApp.cgi.logout()' />

<br/>
<span id='currentAuthToken' style='font-family:monospaced'></span>
Changes to auto.def.
1
2
3
4
5
6
7
8

9


10
11
12
13
14
15
16
# System autoconfiguration. Try: ./configure --help

use cc cc-lib

options {
    with-openssl:path|auto|none
                         => {Look for OpenSSL in the given path, or auto or none}
    with-miniz=0         => {Use miniz from the source tree}

    with-zlib:path       => {Look for zlib in the given path}


    with-legacy-mv-rm=0  => {Enable legacy behavior for mv/rm (skip checkout files)}
    with-th1-docs=0      => {Enable TH1 for embedded documentation pages}
    with-th1-hooks=0     => {Enable TH1 hooks for commands and web pages}
    with-tcl:path        => {Enable Tcl integration, with Tcl in the specified path}
    with-tcl-stubs=0     => {Enable Tcl integration via stubs library mechanism}
    with-tcl-private-stubs=0
                         => {Enable Tcl integration via private stubs mechanism}




|
|

>
|
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# System autoconfiguration. Try: ./configure --help

use cc cc-lib

options {
    with-openssl:path|auto|tree|none
                         => {Look for OpenSSL in the given path, automatically, in the source tree, or none}
    with-miniz=0         => {Use miniz from the source tree}
    with-zlib:path|auto|tree
                         => {Look for zlib in the given path, automatically, or in the source tree}
    with-exec-rel-paths=0
                         => {Enable relative paths for external diff/gdiff}
    with-legacy-mv-rm=0  => {Enable legacy behavior for mv/rm (skip checkout files)}
    with-th1-docs=0      => {Enable TH1 for embedded documentation pages}
    with-th1-hooks=0     => {Enable TH1 hooks for commands and web pages}
    with-tcl:path        => {Enable Tcl integration, with Tcl in the specified path}
    with-tcl-stubs=0     => {Enable Tcl integration via stubs library mechanism}
    with-tcl-private-stubs=0
                         => {Enable Tcl integration via private stubs mechanism}
31
32
33
34
35
36
37






































38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61

62
63
64
65
66
67
68
69
70















71
72
73
74
75
76
77
# Find tclsh for the test suite. Can't yet use jimsh for this.
cc-check-progs tclsh

define EXTRA_CFLAGS ""
define EXTRA_LDFLAGS ""
define USE_SYSTEM_SQLITE 0







































if {![opt-bool internal-sqlite]} {
  proc find_internal_sqlite {} {

    # On some systems (slackware), libsqlite3 requires -ldl to link. So
    # search for the system SQLite once with -ldl, and once without. If
    # the library can only be found with $extralibs set to -ldl, then
    # the code below will append -ldl to LIBS.
    #
    foreach extralibs {{} {-ldl}} {

      # Locate the system SQLite by searching for sqlite3_open(). Then check
      # if sqlite3_strglob() can be found as well. If we can find open() but
      # not strglob(), then the system SQLite is too old to link against
      # fossil.
      #
      if {[cc-check-function-in-lib sqlite3_open sqlite3 $extralibs]} {
        if {![cc-check-function-in-lib sqlite3_malloc64 sqlite3 $extralibs]} {
          user-error "system sqlite3 too old (require >= 3.8.7)"
        }

        # Success. Update symbols and return.
        #
        define USE_SYSTEM_SQLITE 1

        define-append LIBS $extralibs
        return
      }
    }
    user-error "system sqlite3 not found"
  }

  find_internal_sqlite
}
















if {[string match *-solaris* [get-define host]]} {
    define-append EXTRA_CFLAGS {-D_XOPEN_SOURCE=500 -D__EXTENSIONS__}
}

if {[opt-bool fossil-debug]} {
    define-append EXTRA_CFLAGS -DFOSSIL_DEBUG






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>












|
|


|
|
|





>









>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
# Find tclsh for the test suite. Can't yet use jimsh for this.
cc-check-progs tclsh

define EXTRA_CFLAGS ""
define EXTRA_LDFLAGS ""
define USE_SYSTEM_SQLITE 0
define USE_LINENOISE 0
define FOSSIL_ENABLE_MINIZ 0

# This procedure is a customized version of "cc-check-function-in-lib",
# that does not modify the LIBS variable.  Its use prevents prematurely
# pulling in libraries that will be added later anyhow (e.g. "-ldl").
proc check-function-in-lib {function libs {otherlibs {}}} {
    if {[string length $otherlibs]} {
        msg-checking "Checking for $function in $libs with $otherlibs..."
    } else {
        msg-checking "Checking for $function in $libs..."
    }
    set found 0
    cc-with [list -libs $otherlibs] {
        if {[cctest_function $function]} {
            msg-result "none needed"
            define lib_$function ""
            incr found
        } else {
            foreach lib $libs {
                cc-with [list -libs -l$lib] {
                    if {[cctest_function $function]} {
                        msg-result -l$lib
                        define lib_$function -l$lib
                        incr found
                        break
                    }
                }
            }
        }
    }
    if {$found} {
        define [feature-define-name $function]
    } else {
        msg-result "no"
    }
    return $found
}

if {![opt-bool internal-sqlite]} {
  proc find_internal_sqlite {} {

    # On some systems (slackware), libsqlite3 requires -ldl to link. So
    # search for the system SQLite once with -ldl, and once without. If
    # the library can only be found with $extralibs set to -ldl, then
    # the code below will append -ldl to LIBS.
    #
    foreach extralibs {{} {-ldl}} {

      # Locate the system SQLite by searching for sqlite3_open(). Then check
      # if sqlite3_strlike() can be found as well. If we can find open() but
      # not strlike(), then the system SQLite is too old to link against
      # fossil.
      #
      if {[check-function-in-lib sqlite3_open sqlite3 $extralibs]} {
        if {![check-function-in-lib sqlite3_strlike sqlite3 $extralibs]} {
          user-error "system sqlite3 too old (require >= 3.10.0)"
        }

        # Success. Update symbols and return.
        #
        define USE_SYSTEM_SQLITE 1
        define-append LIBS -lsqlite3
        define-append LIBS $extralibs
        return
      }
    }
    user-error "system sqlite3 not found"
  }

  find_internal_sqlite
}

proc is_mingw {} {
    return [string match *mingw* [get-define host]]
}

if {[is_mingw]} {
    define-append EXTRA_CFLAGS -DBROKEN_MINGW_CMDLINE
    define-append LIBS -lkernel32 -lws2_32
} else {
    #
    # NOTE: All platforms except MinGW should use the linenoise
    #       package.  It is currently unsupported on Win32.
    #
    define USE_LINENOISE 1
}

if {[string match *-solaris* [get-define host]]} {
    define-append EXTRA_CFLAGS {-D_XOPEN_SOURCE=500 -D__EXTENSIONS__}
}

if {[opt-bool fossil-debug]} {
    define-append EXTRA_CFLAGS -DFOSSIL_DEBUG
89
90
91
92
93
94
95






96
97
98
99
100
101
102
}

if {[opt-bool with-legacy-mv-rm]} {
    define-append EXTRA_CFLAGS -DFOSSIL_ENABLE_LEGACY_MV_RM
    define FOSSIL_ENABLE_LEGACY_MV_RM
    msg-result "Legacy mv/rm support enabled"
}







if {[opt-bool with-th1-docs]} {
    define-append EXTRA_CFLAGS -DFOSSIL_ENABLE_TH1_DOCS
    define FOSSIL_ENABLE_TH1_DOCS
    msg-result "TH1 embedded documentation support enabled"
}







>
>
>
>
>
>







146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
}

if {[opt-bool with-legacy-mv-rm]} {
    define-append EXTRA_CFLAGS -DFOSSIL_ENABLE_LEGACY_MV_RM
    define FOSSIL_ENABLE_LEGACY_MV_RM
    msg-result "Legacy mv/rm support enabled"
}

if {[opt-bool with-exec-rel-paths]} {
    define-append EXTRA_CFLAGS -DFOSSIL_ENABLE_EXEC_REL_PATHS
    define FOSSIL_ENABLE_EXEC_REL_PATHS
    msg-result "Relative paths in external diff/gdiff enabled"
}

if {[opt-bool with-th1-docs]} {
    define-append EXTRA_CFLAGS -DFOSSIL_ENABLE_TH1_DOCS
    define FOSSIL_ENABLE_TH1_DOCS
    msg-result "TH1 embedded documentation support enabled"
}

115
116
117
118
119
120
121
122















































































































































123
124
125
126
127

128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    # XXX: This will not work on all systems.
    define-append EXTRA_LDFLAGS -static
    msg-result "Trying to link statically"
} else {
    define-append EXTRA_CFLAGS -DFOSSIL_DYNAMIC_BUILD=1
    define FOSSIL_DYNAMIC_BUILD
}
















































































































































set tclpath [opt-val with-tcl]
if {$tclpath ne ""} {
    set tclprivatestubs [opt-bool with-tcl-private-stubs]
    # Note parse-tclconfig-sh is in autosetup/local.tcl
    if {$tclpath eq "1"} {

        if {$tclprivatestubs} {
            set tclconfig(TCL_INCLUDE_SPEC) -Icompat/tcl-8.6/generic
            set tclconfig(TCL_VERSION) {Private Stubs}
            set tclconfig(TCL_PATCH_LEVEL) {}
            set tclconfig(TCL_PREFIX) {compat/tcl-8.6}
            set tclconfig(TCL_LD_FLAGS) { }
        } else {
            # Use the system Tcl. Look in some likely places.
            array set tclconfig [parse-tclconfig-sh \
                compat/tcl-8.6/unix compat/tcl-8.6/win \
                /usr /usr/local /usr/share /opt/local]
            set msg "on your system"
        }
    } else {
        array set tclconfig [parse-tclconfig-sh $tclpath]
        set msg "at $tclpath"
    }







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>





>

|


|




|







178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
    # XXX: This will not work on all systems.
    define-append EXTRA_LDFLAGS -static
    msg-result "Trying to link statically"
} else {
    define-append EXTRA_CFLAGS -DFOSSIL_DYNAMIC_BUILD=1
    define FOSSIL_DYNAMIC_BUILD
}

# Helper for OpenSSL checking
proc check-for-openssl {msg {cflags {}} {libs {-lssl -lcrypto}}} {
    msg-checking "Checking for $msg..."
    set rc 0
    if {[is_mingw]} {
        lappend libs -lgdi32 -lwsock32
    }
    if {[info exists ::zlib_lib]} {
        lappend libs $::zlib_lib
    }
    msg-quiet cc-with [list -cflags $cflags -libs $libs] {
        if {[cc-check-includes openssl/ssl.h] && \
                [cc-check-functions SSL_new]} {
            incr rc
        }
    }
    if {!$rc && ![is_mingw]} {
        # On some systems, OpenSSL appears to require -ldl to link.
        lappend libs -ldl
        msg-quiet cc-with [list -cflags $cflags -libs $libs] {
            if {[cc-check-includes openssl/ssl.h] && \
                    [cc-check-functions SSL_new]} {
                incr rc
            }
        }
    }
    if {$rc} {
        msg-result "ok"
        return 1
    } else {
        msg-result "no"
        return 0
    }
}

if {[opt-bool with-miniz]} {
    define FOSSIL_ENABLE_MINIZ 1
    msg-result "Using miniz for compression"
} else {
    # Check for zlib, using the given location if specified
    set zlibpath [opt-val with-zlib]
    if {$zlibpath eq "tree"} {
        set zlibdir [file dirname $autosetup(dir)]/compat/zlib
        if {![file isdirectory $zlibdir]} {
            user-error "The zlib in source tree directory does not exist"
        }
        cc-with [list -cflags "-I$zlibdir -L$zlibdir"]
        define-append EXTRA_CFLAGS -I$zlibdir
        define-append LIBS $zlibdir/libz.a
        set ::zlib_lib $zlibdir/libz.a
        msg-result "Using zlib in source tree"
    } else {
        if {$zlibpath ni {auto ""}} {
            cc-with [list -cflags "-I$zlibpath -L$zlibpath"]
            define-append EXTRA_CFLAGS -I$zlibpath
            define-append EXTRA_LDFLAGS -L$zlibpath
            msg-result "Using zlib from $zlibpath"
        }
        if {![cc-check-includes zlib.h] || ![check-function-in-lib inflateEnd z]} {
            user-error "zlib not found please install it or specify the location with --with-zlib"
        }
        set ::zlib_lib -lz
    }
}

set ssldirs [opt-val with-openssl]
if {$ssldirs ne "none"} {
    if {[opt-bool with-miniz]} {
        user-error "The --with-miniz option is incompatible with OpenSSL"
    }
    set found 0
    if {$ssldirs eq "tree"} {
        set ssldir [file dirname $autosetup(dir)]/compat/openssl
        if {![file isdirectory $ssldir]} {
            user-error "The OpenSSL in source tree directory does not exist"
        }
        set msg "ssl in $ssldir"
        set cflags "-I$ssldir/include"
        set ldflags "-L$ssldir"
        set ssllibs "$ssldir/libssl.a $ssldir/libcrypto.a"
        set found [check-for-openssl "ssl in source tree" "$cflags $ldflags" $ssllibs]
    } else {
        if {$ssldirs in {auto ""}} {
            catch {
                set cflags [exec pkg-config openssl --cflags-only-I]
                set ldflags [exec pkg-config openssl --libs-only-L]
                set found [check-for-openssl "ssl via pkg-config" "$cflags $ldflags"]
            } msg
            if {!$found} {
                set ssldirs "{} /usr/sfw /usr/local/ssl /usr/lib/ssl /usr/ssl \
                             /usr/pkg /usr/local /usr /usr/local/opt/openssl"
            }
        }
        if {!$found} {
            foreach dir $ssldirs {
                if {$dir eq ""} {
                    set msg "system ssl"
                    set cflags ""
                    set ldflags ""
                } else {
                    set msg "ssl in $dir"
                    set cflags "-I$dir/include"
                    set ldflags "-L$dir/lib"
                }
                if {[check-for-openssl $msg "$cflags $ldflags"]} {
                    incr found
                    break
                }
            }
        }
    }
    if {$found} {
        define FOSSIL_ENABLE_SSL
        define-append EXTRA_CFLAGS $cflags
        define-append EXTRA_LDFLAGS $ldflags
        if {[info exists ssllibs]} {
            define-append LIBS $ssllibs
        } else {
            define-append LIBS -lssl -lcrypto
        }
        if {[info exists ::zlib_lib]} {
            define-append LIBS $::zlib_lib
        }
        if {[is_mingw]} {
            define-append LIBS -lgdi32 -lwsock32
        }
        msg-result "HTTPS support enabled"

        # Silence OpenSSL deprecation warnings on Mac OS X 10.7.
        if {[string match *-darwin* [get-define host]]} {
            if {[cctest -cflags {-Wdeprecated-declarations}]} {
                define-append EXTRA_CFLAGS -Wdeprecated-declarations
            }
        }
    } else {
        user-error "OpenSSL not found. Consider --with-openssl=none to disable HTTPS support"
    }
} else {
    if {[info exists ::zlib_lib]} {
        define-append LIBS $::zlib_lib
    }
}

set tclpath [opt-val with-tcl]
if {$tclpath ne ""} {
    set tclprivatestubs [opt-bool with-tcl-private-stubs]
    # Note parse-tclconfig-sh is in autosetup/local.tcl
    if {$tclpath eq "1"} {
        set tcldir [file dirname $autosetup(dir)]/compat/tcl-8.6
        if {$tclprivatestubs} {
            set tclconfig(TCL_INCLUDE_SPEC) -I$tcldir/generic
            set tclconfig(TCL_VERSION) {Private Stubs}
            set tclconfig(TCL_PATCH_LEVEL) {}
            set tclconfig(TCL_PREFIX) $tcldir
            set tclconfig(TCL_LD_FLAGS) { }
        } else {
            # Use the system Tcl. Look in some likely places.
            array set tclconfig [parse-tclconfig-sh \
                $tcldir/unix $tcldir/win \
                /usr /usr/local /usr/share /opt/local]
            set msg "on your system"
        }
    } else {
        array set tclconfig [parse-tclconfig-sh $tclpath]
        set msg "at $tclpath"
    }
154
155
156
157
158
159
160

161
162
163














164


165
166
167
























168
169

170
171
172
173
174
175
176
177
178














179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
        define FOSSIL_ENABLE_TCL_STUBS
        define USE_TCL_STUBS
    } else {
        set libs "$tclconfig(TCL_LIB_SPEC) $tclconfig(TCL_LIBS)"
    }
    set cflags $tclconfig(TCL_INCLUDE_SPEC)
    if {!$tclprivatestubs} {

        cc-with [list -cflags $cflags -libs $libs] {
            if {$tclstubs} {
                if {![cc-check-functions Tcl_InitStubs]} {














                    user-error "Cannot find a usable Tcl stubs library $msg"


                }
            } else {
                if {![cc-check-functions Tcl_CreateInterp]} {
























                    user-error "Cannot find a usable Tcl library $msg"
                }

            }
        }
    }
    set version $tclconfig(TCL_VERSION)$tclconfig(TCL_PATCH_LEVEL)
    msg-result "Found Tcl $version at $tclconfig(TCL_PREFIX)"
    if {!$tclprivatestubs} {
        define-append LIBS $libs
    }
    define-append EXTRA_CFLAGS $cflags














    define-append EXTRA_LDFLAGS $tclconfig(TCL_LD_FLAGS)
    define FOSSIL_ENABLE_TCL
}

# Helper for OpenSSL checking
proc check-for-openssl {msg {cflags {}}} {
    msg-checking "Checking for $msg..."
    set rc 0
    msg-quiet cc-with [list -cflags $cflags -libs {-lssl -lcrypto}] {
        if {[cc-check-includes openssl/ssl.h] && [cc-check-functions SSL_new]} {
            incr rc
        }
    }
    if {$rc} {
        msg-result "ok"
        return 1
    } else {
        msg-result "no"
        return 0
    }
}

set ssldirs [opt-val with-openssl]
if {$ssldirs ne "none"} {
    set found 0
    if {$ssldirs in {auto ""}} {
        catch {
            set cflags [exec pkg-config openssl --cflags-only-I]
            set ldflags [exec pkg-config openssl --libs-only-L]

            set found [check-for-openssl "ssl via pkg-config" "$cflags $ldflags"]
        } msg
        if {!$found} {
            set ssldirs "{} /usr/sfw /usr/local/ssl /usr/lib/ssl /usr/ssl /usr/pkg /usr/local /usr"
        }
    }
    if {!$found} {
        foreach dir $ssldirs {
            if {$dir eq ""} {
                set msg "system ssl"
                set cflags ""
                set ldflags ""
            } else {
                set msg "ssl in $dir"
                set cflags "-I$dir/include"
                set ldflags "-L$dir/lib"
            }
            if {[check-for-openssl $msg "$cflags $ldflags"]} {
                incr found
                break
            }
        }
    }
    if {$found} {
        define FOSSIL_ENABLE_SSL
        define-append EXTRA_CFLAGS $cflags
        define-append EXTRA_LDFLAGS $ldflags
        define-append LIBS -lssl -lcrypto
        msg-result "HTTPS support enabled"

        # Silence OpenSSL deprecation warnings on Mac OS X 10.7.
        if {[string match *-darwin* [get-define host]]} {
            if {[cctest -cflags {-Wdeprecated-declarations}]} {
                define-append EXTRA_CFLAGS -Wdeprecated-declarations
            }
        }
    } else {
        user-error "OpenSSL not found. Consider --with-openssl=none to disable HTTPS support"
    }
}

if {[opt-bool with-miniz]} {
  define FOSSIL_ENABLE_MINIZ 1
  msg-result "Using miniz for compression"
} else {
  # Check for zlib, using the given location if specified
  set zlibpath [opt-val with-zlib]
  if {$zlibpath ne ""} {
      cc-with [list -cflags "-I$zlibpath -L$zlibpath"]
      define-append EXTRA_CFLAGS -I$zlibpath
      define-append EXTRA_LDFLAGS -L$zlibpath
      msg-result "Using zlib from $zlibpath"
  }
  if {![cc-check-includes zlib.h] || ![cc-check-function-in-lib inflateEnd z]} {
      user-error "zlib not found please install it or specify the location with --with-zlib"
  }
}

# Network functions require libraries on some systems
cc-check-function-in-lib gethostbyname nsl
if {![cc-check-function-in-lib socket {socket network}]} {
    # Last resort, may be Windows
    if {[string match *mingw* [get-define host]]} {
        define-append LIBS -lwsock32
    }
}
cc-check-function-in-lib iconv iconv
cc-check-functions utime
cc-check-functions usleep
cc-check-functions strchrnul






>


|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
>









>
>
>
>
>
>
>
>
>
>
>
>
>
>




<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<




|







361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445




















































































446
447
448
449
450
451
452
453
454
455
456
457
        define FOSSIL_ENABLE_TCL_STUBS
        define USE_TCL_STUBS
    } else {
        set libs "$tclconfig(TCL_LIB_SPEC) $tclconfig(TCL_LIBS)"
    }
    set cflags $tclconfig(TCL_INCLUDE_SPEC)
    if {!$tclprivatestubs} {
        set foundtcl 0; # Did we find a working Tcl library?
        cc-with [list -cflags $cflags -libs $libs] {
            if {$tclstubs} {
                if {[cc-check-functions Tcl_InitStubs]} {
                    set foundtcl 1
                }
            } else {
                if {[cc-check-functions Tcl_CreateInterp]} {
                    set foundtcl 1
                }
            }
        }
        if {!$foundtcl && [string match *-lieee* $libs]} {
            # On some systems, using "-lieee" from TCL_LIB_SPEC appears
            # to cause issues.
            msg-result "Removing \"-lieee\" and retrying for Tcl..."
            set libs [string map [list -lieee ""] $libs]
            cc-with [list -cflags $cflags -libs $libs] {
                if {$tclstubs} {
                    if {[cc-check-functions Tcl_InitStubs]} {
                        set foundtcl 1
                    }
                } else {
                    if {[cc-check-functions Tcl_CreateInterp]} {
                        set foundtcl 1
                    }
                }
            }
        }
        if {!$foundtcl && ![string match *-lpthread* $libs]} {
            # On some systems, TCL_LIB_SPEC appears to be missing
            # "-lpthread".  Try adding it.
            msg-result "Adding \"-lpthread\" and retrying for Tcl..."
            set libs "$libs -lpthread"
            cc-with [list -cflags $cflags -libs $libs] {
                if {$tclstubs} {
                    if {[cc-check-functions Tcl_InitStubs]} {
                        set foundtcl 1
                    }
                } else {
                    if {[cc-check-functions Tcl_CreateInterp]} {
                        set foundtcl 1
                    }
                }
            }
        }
        if {!$foundtcl} {
            if {$tclstubs} {
                user-error "Cannot find a usable Tcl stubs library $msg"
            } else {
                user-error "Cannot find a usable Tcl library $msg"
            }
        }
    }
    set version $tclconfig(TCL_VERSION)$tclconfig(TCL_PATCH_LEVEL)
    msg-result "Found Tcl $version at $tclconfig(TCL_PREFIX)"
    if {!$tclprivatestubs} {
        define-append LIBS $libs
    }
    define-append EXTRA_CFLAGS $cflags
    if {[info exists zlibpath] && $zlibpath eq "tree"} {
      #
      # NOTE: When using zlib in the source tree, prevent Tcl from
      #       pulling in the system one.
      #
      set tclconfig(TCL_LD_FLAGS) [string map [list -lz ""] \
          $tclconfig(TCL_LD_FLAGS)]
    }
    #
    # NOTE: Remove "-ldl" from the TCL_LD_FLAGS because it will be
    #       be checked for near the bottom of this file.
    #
    set tclconfig(TCL_LD_FLAGS) [string map [list -ldl ""] \
        $tclconfig(TCL_LD_FLAGS)]
    define-append EXTRA_LDFLAGS $tclconfig(TCL_LD_FLAGS)
    define FOSSIL_ENABLE_TCL
}





















































































# Network functions require libraries on some systems
cc-check-function-in-lib gethostbyname nsl
if {![cc-check-function-in-lib socket {socket network}]} {
    # Last resort, may be Windows
    if {[is_mingw]} {
        define-append LIBS -lwsock32
    }
}
cc-check-function-in-lib iconv iconv
cc-check-functions utime
cc-check-functions usleep
cc-check-functions strchrnul
Changes to autosetup/autosetup.
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
	return $pwd
}

# Follow symlinks until we get to something which is not a symlink
proc realpath {path} {
	while {1} {
		if {[catch {
			set path [file link $path]
		}]} {
			# Not a link
			break
		}
	}
	return $path
}






|







814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
	return $pwd
}

# Follow symlinks until we get to something which is not a symlink
proc realpath {path} {
	while {1} {
		if {[catch {
			set path [file readlink $path]
		}]} {
			# Not a link
			break
		}
	}
	return $path
}
1186
1187
1188
1189
1190
1191
1192
1193
1194






1195
1196
1197
1198
1199
1200
1201
    exit 0
}

# If not already paged and stdout is a tty, pipe the output through the pager
# This is done by reinvoking autosetup with --nopager added
proc use_pager {} {
    if {![opt-bool nopager] && [getenv PAGER ""] ne "" && [isatty? stdin] && [isatty? stdout]} {
        catch {
            exec [info nameofexecutable] $::argv0 --nopager {*}$::argv |& [getenv PAGER] >@stdout <@stdin






        }
        exit 0
    }
}

# Outputs the autosetup references in one of several formats
proc autosetup_reference {{type text}} {






|
|
>
>
>
>
>
>







1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
    exit 0
}

# If not already paged and stdout is a tty, pipe the output through the pager
# This is done by reinvoking autosetup with --nopager added
proc use_pager {} {
    if {![opt-bool nopager] && [getenv PAGER ""] ne "" && [isatty? stdin] && [isatty? stdout]} {
        if {[catch {
            exec [info nameofexecutable] $::argv0 --nopager {*}$::argv |& {*}[getenv PAGER] >@stdout <@stdin 2>@stderr
        } msg opts] == 1} {
            if {[dict get $opts -errorcode] eq "NONE"} {
                # an internal/exec error
                puts stderr $msg
                exit 1
            }
        }
        exit 0
    }
}

# Outputs the autosetup references in one of several formats
proc autosetup_reference {{type text}} {
Changes to autosetup/system.tcl.
105
106
107
108
109
110
111
112
113
114

115
116
117
118
119
120
121
# Reads the input file <srcdir>/$template and writes the output file $outfile.
# If $outfile is blank/omitted, $template should end with ".in" which
# is removed to create the output file name.
#
# Each pattern of the form @define@ is replaced the the corresponding
# define, if it exists, or left unchanged if not.
# 
# The special value @srcdir@ is subsituted with the relative
# path to the source directory from the directory where the output
# file is created. Use @top_srcdir@ for the absolute path.

#
# Conditional sections may be specified as follows:
## @if name == value
## lines
## @else
## lines
## @endif






|

|
>







105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Reads the input file <srcdir>/$template and writes the output file $outfile.
# If $outfile is blank/omitted, $template should end with ".in" which
# is removed to create the output file name.
#
# Each pattern of the form @define@ is replaced the the corresponding
# define, if it exists, or left unchanged if not.
# 
# The special value @srcdir@ is substituted with the relative
# path to the source directory from the directory where the output
# file is created, while the special value @top_srcdir@ is substituted
# with the relative path to the top level source directory.
#
# Conditional sections may be specified as follows:
## @if name == value
## lines
## @else
## lines
## @endif
149
150
151
152
153
154
155
156
157

158
159
160
161
162
163
164
	}

	set outdir [file dirname $out]

	# Make sure the directory exists
	file mkdir $outdir

	# Set up srcdir to be relative to the target dir
	define srcdir [relative-path [file join $::autosetup(srcdir) $outdir] $outdir]


	set mapping {}
	foreach {n v} [array get ::define] {
		lappend mapping @$n@ $v
	}
	set result {}
	foreach line [split [readfile $infile] \n] {






|

>







150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
	}

	set outdir [file dirname $out]

	# Make sure the directory exists
	file mkdir $outdir

	# Set up srcdir and top_srcdir to be relative to the target dir
	define srcdir [relative-path [file join $::autosetup(srcdir) $outdir] $outdir]
	define top_srcdir [relative-path $::autosetup(srcdir) $outdir]

	set mapping {}
	foreach {n v} [array get ::define] {
		lappend mapping @$n@ $v
	}
	set result {}
	foreach line [split [readfile $infile] \n] {
Changes to fossil.1.
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
\fICOMMAND [OPTIONS]\fR
.SH DESCRIPTION
Fossil is a distributed version control system (DVCS) with built-in
wiki, ticket tracker, CGI/http interface, and http server.

.SH Common COMMANDs:

add            clean          import         pull           stash 
.br
addremove      clone          info           purge          status
.br
all            commit         init           push           sync
.br
annotate       diff           json           rebuild        tag
.br






|







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
\fICOMMAND [OPTIONS]\fR
.SH DESCRIPTION
Fossil is a distributed version control system (DVCS) with built-in
wiki, ticket tracker, CGI/http interface, and http server.

.SH Common COMMANDs:

add            clean          import         pull           stash
.br
addremove      clone          info           purge          status
.br
all            commit         init           push           sync
.br
annotate       diff           json           rebuild        tag
.br
Changes to setup/fossil.nsi.
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
Name "Fossil"

; The file to write
OutFile "fossil-setup.exe"

; The default installation directory
InstallDir $PROGRAMFILES\Fossil
; Registry key to check for directory (so if you install again, it will 
; overwrite the old one automatically)
InstallDirRegKey HKLM SOFTWARE\Fossil "Install_Dir"

; The text to prompt the user to enter a directory
ComponentText "This will install fossil on your computer."
; The text to prompt the user to enter a directory
DirText "Choose a directory to install in to:"






|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
Name "Fossil"

; The file to write
OutFile "fossil-setup.exe"

; The default installation directory
InstallDir $PROGRAMFILES\Fossil
; Registry key to check for directory (so if you install again, it will
; overwrite the old one automatically)
InstallDirRegKey HKLM SOFTWARE\Fossil "Install_Dir"

; The text to prompt the user to enter a directory
ComponentText "This will install fossil on your computer."
; The text to prompt the user to enter a directory
DirText "Choose a directory to install in to:"
Changes to skins/README.md.
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
   5.   Type "make" to rebuild.

Development Hints
-----------------

One way to develop a new skin is to copy the baseline files (css.txt,
details.txt, footer.txt, and header.txt) into a working directory $WORKDIR 
then launch Fossil with a command-line option "--skin $WORKDIR".  Example:

        cp -r skins/default newskin
        fossil ui --skin ./newskin

When the argument to --skin contains one or more '/' characters, the
appropriate skin files are read from disk from the directory specified.






|







27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
   5.   Type "make" to rebuild.

Development Hints
-----------------

One way to develop a new skin is to copy the baseline files (css.txt,
details.txt, footer.txt, and header.txt) into a working directory $WORKDIR
then launch Fossil with a command-line option "--skin $WORKDIR".  Example:

        cp -r skins/default newskin
        fossil ui --skin ./newskin

When the argument to --skin contains one or more '/' characters, the
appropriate skin files are read from disk from the directory specified.
Changes to skins/black_and_white/footer.txt.
1
2
3
4
<div class="footer">
Fossil version $manifest_version $manifest_date
</div>
</body></html>
|


1
2
3
4
<div class="footer">
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/blitz/css.txt.
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  border-right: 1px solid #ddd;
}

tr.timelineSelected {
  border-left: 2px solid orange;
  background-color: #ffffe8;
  border-bottom: 1px solid #ddd;
  border-right: 1px solid #ddd;  
}

tr.timelineCurrent td.timelineTableCell {
}

tr.timelineSpacer {
}






|







1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  border-right: 1px solid #ddd;
}

tr.timelineSelected {
  border-left: 2px solid orange;
  background-color: #ffffe8;
  border-bottom: 1px solid #ddd;
  border-right: 1px solid #ddd;
}

tr.timelineCurrent td.timelineTableCell {
}

tr.timelineSpacer {
}
Changes to skins/blitz/footer.txt.
1
2
3
4
5
6
7
8
9
10
11
12
      </div> <!-- end div container -->
    </div> <!-- end div middle max-full-width -->
    <div class="footer">
      <div class="container">
        <div class="pull-right">
          <a href="http://fossil-scm.org">Fossil version $manifest_version $manifest_date</a>
        </div>
        This page was generated in about <th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s
      </div>
    </div>
  </body>
</html>




|






1
2
3
4
5
6
7
8
9
10
11
12
      </div> <!-- end div container -->
    </div> <!-- end div middle max-full-width -->
    <div class="footer">
      <div class="container">
        <div class="pull-right">
          <a href="http://fossil-scm.org">Fossil $release_version $manifest_version $manifest_date</a>
        </div>
        This page was generated in about <th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s
      </div>
    </div>
  </body>
</html>
Changes to skins/blitz/ticket.txt.
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
              username AS xusername
         FROM ticketchng
        WHERE tkt_id=$tkt_id AND length(icomment)>0} {
          if {$seenRow eq "0"} {
            html "<h5>User Comments</h5>\n"
            set seenRow 1
          }
  html "<div class='tktComment'>\n"          
  html "<div class='tktCommentHeader'>\n"
  html "<div class='pull-right'>$xdate</div>\n"
  html "<span class='tktCommentLogin'>[htmlize $xlogin]</span>"
  if {$xlogin ne $xusername && [string length $xusername]>0} {
    html " (claiming to be <span class='tktCommentLogin'>[htmlize $xusername]</span>)"
  }
  html " commented</div>\n"






|







83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
              username AS xusername
         FROM ticketchng
        WHERE tkt_id=$tkt_id AND length(icomment)>0} {
          if {$seenRow eq "0"} {
            html "<h5>User Comments</h5>\n"
            set seenRow 1
          }
  html "<div class='tktComment'>\n"
  html "<div class='tktCommentHeader'>\n"
  html "<div class='pull-right'>$xdate</div>\n"
  html "<span class='tktCommentLogin'>[htmlize $xlogin]</span>"
  if {$xlogin ne $xusername && [string length $xusername]>0} {
    html " (claiming to be <span class='tktCommentLogin'>[htmlize $xusername]</span>)"
  }
  html " commented</div>\n"
Changes to skins/blitz_no_logo/footer.txt.
1
2
3
4
5
6
7
8
9
10
11
12
      </div> <!-- end div container -->
    </div> <!-- end div middle max-full-width -->
    <div class="footer">
      <div class="container">
        <div class="pull-right">
          <a href="http://fossil-scm.org">Fossil version $manifest_version $manifest_date</a>
        </div>
        This page was generated in about <th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s
      </div>
    </div>
  </body>
</html>




|






1
2
3
4
5
6
7
8
9
10
11
12
      </div> <!-- end div container -->
    </div> <!-- end div middle max-full-width -->
    <div class="footer">
      <div class="container">
        <div class="pull-right">
          <a href="http://fossil-scm.org">Fossil $release_version $manifest_version $manifest_date</a>
        </div>
        This page was generated in about <th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s
      </div>
    </div>
  </body>
</html>
Changes to skins/blitz_no_logo/ticket.txt.
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
              username AS xusername
         FROM ticketchng
        WHERE tkt_id=$tkt_id AND length(icomment)>0} {
          if {$seenRow eq "0"} {
            html "<h5>User Comments</h5>\n"
            set seenRow 1
          }
  html "<div class='tktComment'>\n"          
  html "<div class='tktCommentHeader'>\n"
  html "<div class='pull-right'>$xdate</div>\n"
  html "<span class='tktCommentLogin'>[htmlize $xlogin]</span>"
  if {$xlogin ne $xusername && [string length $xusername]>0} {
    html " (claiming to be <span class='tktCommentLogin'>[htmlize $xusername]</span>)"
  }
  html " commented</div>\n"






|







83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
              username AS xusername
         FROM ticketchng
        WHERE tkt_id=$tkt_id AND length(icomment)>0} {
          if {$seenRow eq "0"} {
            html "<h5>User Comments</h5>\n"
            set seenRow 1
          }
  html "<div class='tktComment'>\n"
  html "<div class='tktCommentHeader'>\n"
  html "<div class='pull-right'>$xdate</div>\n"
  html "<span class='tktCommentLogin'>[htmlize $xlogin]</span>"
  if {$xlogin ne $xusername && [string length $xusername]>0} {
    html " (claiming to be <span class='tktCommentLogin'>[htmlize $xusername]</span>)"
  }
  html " commented</div>\n"
Changes to skins/default/footer.txt.
1
2
3
4
5
6
<div class="footer">
This page was generated in about
<th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s by
Fossil version $manifest_version $manifest_date
</div>
</body></html>


|


1
2
3
4
5
6
<div class="footer">
This page was generated in about
<th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s by
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/khaki/footer.txt.
1
2
3
4
<div class="footer">
Fossil version $manifest_version $manifest_date
</div>
</body></html>
|


1
2
3
4
<div class="footer">
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/original/footer.txt.
1
2
3
4
5
6
<div class="footer">
This page was generated in about
<th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s by
Fossil version $manifest_version $manifest_date
</div>
</body></html>


|


1
2
3
4
5
6
<div class="footer">
This page was generated in about
<th1>puts [expr {([utime]+[stime]+1000)/1000*0.001}]</th1>s by
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/plain_gray/footer.txt.
1
2
3
4
<div class="footer">
Fossil version $manifest_version $manifest_date
</div>
</body></html>
|


1
2
3
4
<div class="footer">
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/rounded1/footer.txt.
1
2
3
4
<div class="footer">
Fossil version $manifest_version $manifest_date
</div>
</body></html>
|


1
2
3
4
<div class="footer">
Fossil $release_version $manifest_version $manifest_date
</div>
</body></html>
Changes to skins/xekri/css.txt.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
/******************************************************************************
 * Xekri
 *
 * To adjust the width of the contents for this skin, look for the "max-width" 
 * property and change its value.  (It's in the "Main Area" section)  The value 
 * determines how much of the browser window to use.  Some like 100%, so that 
 * the entire window is used.  Others prefer 80%, which makes the contents 
 * easier to read for them.
 */


/**************************************
 * General HTML
 */


|
|
|
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
/******************************************************************************
 * Xekri
 *
 * To adjust the width of the contents for this skin, look for the "max-width"
 * property and change its value.  (It's in the "Main Area" section)  The value
 * determines how much of the browser window to use.  Some like 100%, so that
 * the entire window is used.  Others prefer 80%, which makes the contents
 * easier to read for them.
 */


/**************************************
 * General HTML
 */
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
span.wikiruleHead {
  font-weight: bold;
}


/* format for user color input on checkin edit page */
input.checkinUserColor {
  /* no special definitions, class defined, to enable color pickers, 
  * f.e.:
  * ** add the color picker found at http:jscolor.com as java script 
  * include
  * ** to the header and configure the java script file with
  * ** 1. use as bindClass :checkinUserColor
  * ** 2. change the default hash adding behaviour to ON
  * ** or change the class defition of element identified by 
  * id="clrcust"
  * ** to a standard jscolor definition with java script in the footer. 
  * */
}

/* format for end of content area, to be used to clear page flow. */
div.endContent {
  clear: both;
}






|

|




|

|







929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
span.wikiruleHead {
  font-weight: bold;
}


/* format for user color input on checkin edit page */
input.checkinUserColor {
  /* no special definitions, class defined, to enable color pickers,
  * f.e.:
  * ** add the color picker found at http:jscolor.com as java script
  * include
  * ** to the header and configure the java script file with
  * ** 1. use as bindClass :checkinUserColor
  * ** 2. change the default hash adding behaviour to ON
  * ** or change the class defition of element identified by
  * id="clrcust"
  * ** to a standard jscolor definition with java script in the footer.
  * */
}

/* format for end of content area, to be used to clear page flow. */
div.endContent {
  clear: both;
}
Changes to src/add.c.
166
167
168
169
170
171
172
173

174
175
176
177
178
179
180
  if( !file_is_simple_pathname(zPath, 1) ){
    fossil_warning("filename contains illegal characters: %s", zPath);
    return 0;
  }
  if( db_exists("SELECT 1 FROM vfile"
                " WHERE pathname=%Q %s", zPath, filename_collation()) ){
    db_multi_exec("UPDATE vfile SET deleted=0"
                  " WHERE pathname=%Q %s", zPath, filename_collation());

  }else{
    char *zFullname = mprintf("%s%s", g.zLocalRoot, zPath);
    int isExe = file_wd_isexe(zFullname);
    db_multi_exec(
      "INSERT INTO vfile(vid,deleted,rid,mrid,pathname,isexe,islink)"
      "VALUES(%d,0,0,0,%Q,%d,%d)",
      vid, zPath, isExe, file_wd_islink(0));






|
>







166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
  if( !file_is_simple_pathname(zPath, 1) ){
    fossil_warning("filename contains illegal characters: %s", zPath);
    return 0;
  }
  if( db_exists("SELECT 1 FROM vfile"
                " WHERE pathname=%Q %s", zPath, filename_collation()) ){
    db_multi_exec("UPDATE vfile SET deleted=0"
                  " WHERE pathname=%Q %s AND deleted",
                  zPath, filename_collation());
  }else{
    char *zFullname = mprintf("%s%s", g.zLocalRoot, zPath);
    int isExe = file_wd_isexe(zFullname);
    db_multi_exec(
      "INSERT INTO vfile(vid,deleted,rid,mrid,pathname,isexe,islink)"
      "VALUES(%d,0,0,0,%Q,%d,%d)",
      vid, zPath, isExe, file_wd_islink(0));
711
712
713
714
715
716
717



718
719

720
721
722
723
724
725
726
  const char *zNew,
  int dryRunFlag
){
  int x = db_int(-1, "SELECT deleted FROM vfile WHERE pathname=%Q %s",
                         zNew, filename_collation());
  if( x>=0 ){
    if( x==0 ){



      fossil_fatal("cannot rename '%s' to '%s' since another file named '%s'"
                   " is currently under management", zOrig, zNew, zNew);

    }else{
      fossil_fatal("cannot rename '%s' to '%s' since the delete of '%s' has "
                   "not yet been committed", zOrig, zNew, zNew);
    }
  }
  fossil_print("RENAME %s %s\n", zOrig, zNew);
  if( !dryRunFlag ){






>
>
>
|
|
>







712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
  const char *zNew,
  int dryRunFlag
){
  int x = db_int(-1, "SELECT deleted FROM vfile WHERE pathname=%Q %s",
                         zNew, filename_collation());
  if( x>=0 ){
    if( x==0 ){
      if( !filenames_are_case_sensitive() && fossil_stricmp(zOrig,zNew)==0 ){
        /* Case change only */
      }else{
        fossil_fatal("cannot rename '%s' to '%s' since another file named '%s'"
                     " is currently under management", zOrig, zNew, zNew);
      }
    }else{
      fossil_fatal("cannot rename '%s' to '%s' since the delete of '%s' has "
                   "not yet been committed", zOrig, zNew, zNew);
    }
  }
  fossil_print("RENAME %s %s\n", zOrig, zNew);
  if( !dryRunFlag ){
741
742
743
744
745
746
747

748
749
750
751
752
753

754


755
756

757
758
759
760
761
762
763
static void add_file_to_move(
  const char *zOldName, /* The old name of the file on disk. */
  const char *zNewName  /* The new name of the file on disk. */
){
  static int tableCreated = 0;
  Blob fullOldName;
  Blob fullNewName;

  if( !tableCreated ){
    db_multi_exec("CREATE TEMP TABLE fmove(x TEXT PRIMARY KEY %s, y TEXT %s)",
                  filename_collation(), filename_collation());
    tableCreated = 1;
  }
  file_tree_name(zOldName, &fullOldName, 1, 1);

  file_tree_name(zNewName, &fullNewName, 1, 1);


  db_multi_exec("INSERT INTO fmove VALUES('%q','%q');",
                blob_str(&fullOldName), blob_str(&fullNewName));

  blob_reset(&fullNewName);
  blob_reset(&fullOldName);
}

/*
** This function moves files within the checkout, using the file names
** contained in the temporary table "fmove".  The temporary table is






>






>

>
>
|
<
>







746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764

765
766
767
768
769
770
771
772
static void add_file_to_move(
  const char *zOldName, /* The old name of the file on disk. */
  const char *zNewName  /* The new name of the file on disk. */
){
  static int tableCreated = 0;
  Blob fullOldName;
  Blob fullNewName;
  char *zOld, *zNew;
  if( !tableCreated ){
    db_multi_exec("CREATE TEMP TABLE fmove(x TEXT PRIMARY KEY %s, y TEXT %s)",
                  filename_collation(), filename_collation());
    tableCreated = 1;
  }
  file_tree_name(zOldName, &fullOldName, 1, 1);
  zOld = blob_str(&fullOldName);
  file_tree_name(zNewName, &fullNewName, 1, 1);
  zNew = blob_str(&fullNewName);
  if( filenames_are_case_sensitive() || fossil_stricmp(zOld,zNew)!=0 ){
    db_multi_exec("INSERT INTO fmove VALUES('%q','%q');", zOld, zNew);

  }
  blob_reset(&fullNewName);
  blob_reset(&fullOldName);
}

/*
** This function moves files within the checkout, using the file names
** contained in the temporary table "fmove".  The temporary table is
Changes to src/allrepo.c.
100
101
102
103
104
105
106


107
108
109
110
111
112
113
**                caution should be exercised with this command because its
**                effects cannot be undone.  Use of the --dry-run option to
**                carefully review the local checkouts to be operated upon
**                and the --whatif option to carefully review the files to
**                be deleted beforehand is highly recommended.  The command
**                line options supported by the clean command itself, if any
**                are present, are passed along verbatim.


**
**    dbstat      Run the "dbstat" command on all repositories.
**
**    extras      Shows "extra" files from all local checkouts.  The command
**                line options supported by the extra command itself, if any
**                are present, are passed along verbatim.
**






>
>







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
**                caution should be exercised with this command because its
**                effects cannot be undone.  Use of the --dry-run option to
**                carefully review the local checkouts to be operated upon
**                and the --whatif option to carefully review the files to
**                be deleted beforehand is highly recommended.  The command
**                line options supported by the clean command itself, if any
**                are present, are passed along verbatim.
**
**    config      Only the "config pull AREA" command works.
**
**    dbstat      Run the "dbstat" command on all repositories.
**
**    extras      Shows "extra" files from all local checkouts.  The command
**                line options supported by the extra command itself, if any
**                are present, are passed along verbatim.
**
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
    dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
  }

  if( g.argc<3 ){
    usage("SUBCOMMAND ...");
  }
  n = strlen(g.argv[2]);
  db_open_config(1);
  blob_zero(&extra);
  zCmd = g.argv[2];
  if( !login_is_nobody() ) blob_appendf(&extra, " -U %s", g.zLogin);
  if( strncmp(zCmd, "list", n)==0 || strncmp(zCmd,"ls",n)==0 ){
    zCmd = "list";
    useCheckouts = find_option("ckout","c",0)!=0;
  }else if( strncmp(zCmd, "clean", n)==0 ){






|







183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
    dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
  }

  if( g.argc<3 ){
    usage("SUBCOMMAND ...");
  }
  n = strlen(g.argv[2]);
  db_open_config(1, 0);
  blob_zero(&extra);
  zCmd = g.argv[2];
  if( !login_is_nobody() ) blob_appendf(&extra, " -U %s", g.zLogin);
  if( strncmp(zCmd, "list", n)==0 || strncmp(zCmd,"ls",n)==0 ){
    zCmd = "list";
    useCheckouts = find_option("ckout","c",0)!=0;
  }else if( strncmp(zCmd, "clean", n)==0 ){
205
206
207
208
209
210
211









212
213
214
215
216
217
218
    collect_argument_value(&extra, "ignore");
    collect_argument_value(&extra, "keep");
    collect_argument(&extra, "no-prompt",0);
    collect_argument(&extra, "temp",0);
    collect_argument(&extra, "verbose","v");
    collect_argument(&extra, "whatif",0);
    useCheckouts = 1;









  }else if( strncmp(zCmd, "dbstat", n)==0 ){
    zCmd = "dbstat --omit-version-info -R";
    showLabel = 1;
    quiet = 1;
    collect_argument(&extra, "brief", "b");
    collect_argument(&extra, "db-check", 0);
  }else if( strncmp(zCmd, "extras", n)==0 ){






>
>
>
>
>
>
>
>
>







207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
    collect_argument_value(&extra, "ignore");
    collect_argument_value(&extra, "keep");
    collect_argument(&extra, "no-prompt",0);
    collect_argument(&extra, "temp",0);
    collect_argument(&extra, "verbose","v");
    collect_argument(&extra, "whatif",0);
    useCheckouts = 1;
  }else if( strncmp(zCmd, "config", n)==0 ){
    zCmd = "config -R";
    collect_argv(&extra, 3);
    (void)find_option("legacy",0,0);
    (void)find_option("overwrite",0,0);
    verify_all_options();
    if( g.argc!=5 || fossil_strcmp(g.argv[3],"pull")!=0 ){
      usage("configure pull AREA ?OPTIONS?");
    }
  }else if( strncmp(zCmd, "dbstat", n)==0 ){
    zCmd = "dbstat --omit-version-info -R";
    showLabel = 1;
    quiet = 1;
    collect_argument(&extra, "brief", "b");
    collect_argument(&extra, "db-check", 0);
  }else if( strncmp(zCmd, "extras", n)==0 ){
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    int j;
    Blob fn = BLOB_INITIALIZER;
    Blob sql = BLOB_INITIALIZER;
    useCheckouts = find_option("ckout","c",0)!=0;
    verify_all_options();
    db_begin_transaction();
    for(j=3; j<g.argc; j++, blob_reset(&sql), blob_reset(&fn)){
      file_canonical_name(g.argv[j], &fn, 0);
      blob_append_sql(&sql,
         "DELETE FROM global_config WHERE name GLOB '%s:%q'",
         useCheckouts?"ckout":"repo", blob_str(&fn)
      );
      if( dryRunFlag ){
        fossil_print("%s\n", blob_sql_text(&sql));
      }else{






|







290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
    int j;
    Blob fn = BLOB_INITIALIZER;
    Blob sql = BLOB_INITIALIZER;
    useCheckouts = find_option("ckout","c",0)!=0;
    verify_all_options();
    db_begin_transaction();
    for(j=3; j<g.argc; j++, blob_reset(&sql), blob_reset(&fn)){
      file_canonical_name(g.argv[j], &fn, useCheckouts?1:0);
      blob_append_sql(&sql,
         "DELETE FROM global_config WHERE name GLOB '%s:%q'",
         useCheckouts?"ckout":"repo", blob_str(&fn)
      );
      if( dryRunFlag ){
        fossil_print("%s\n", blob_sql_text(&sql));
      }else{
Changes to src/attach.c.
24
25
26
27
28
29
30
31
32
33
34


35
36
37
38

39
40
41
42
43
44
45
46
47
48





49
50
51
52
53
54
55
56
57
58
59





60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78


79
80
81
82
83
84
85
86
87
88
89


90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112



113
114
115
116
117
118
119
/*
** WEBPAGE: attachlist
** List attachments.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
**
** Either one of tkt= or page= are supplied or neither but not both.
** If neither are given, all attachments are listed.  If one is given,
** only attachments for the designated ticket or wiki page are shown.
** TICKETUUID must be complete


*/
void attachlist_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");

  Blob sql;
  Stmt q;

  if( zPage && zTkt ) zTkt = 0;
  login_check_credentials();
  blob_zero(&sql);
  blob_append_sql(&sql,
     "SELECT datetime(mtime%s), src, target, filename,"
     "       comment, user,"
     "       (SELECT uuid FROM blob WHERE rid=attachid), attachid"





     "  FROM attachment",
     timeline_utc()
  );
  if( zPage ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    style_header("Attachments To %h", zPage);
    blob_append_sql(&sql, " WHERE target=%Q", zPage);
  }else if( zTkt ){
    if( g.perm.RdTkt==0 ){ login_needed(g.anon.RdTkt); return; }
    style_header("Attachments To Ticket %S", zTkt);
    blob_append_sql(&sql, " WHERE target GLOB '%q*'", zTkt);





  }else{
    if( g.perm.RdTkt==0 && g.perm.RdWiki==0 ){
      login_needed(g.anon.RdTkt || g.anon.RdWiki);
      return;
    }
    style_header("All Attachments");
  }
  blob_append_sql(&sql, " ORDER BY mtime DESC");
  db_prepare(&q, "%s", blob_sql_text(&sql));
  @ <ol>
  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zSrc = db_column_text(&q, 1);
    const char *zTarget = db_column_text(&q, 2);
    const char *zFilename = db_column_text(&q, 3);
    const char *zComment = db_column_text(&q, 4);
    const char *zUser = db_column_text(&q, 5);
    const char *zUuid = db_column_text(&q, 6);
    int attachid = db_column_int(&q, 7);


    const char *zDispUser = zUser && zUser[0] ? zUser : "anonymous";
    int i;
    char *zUrlTail;
    for(i=0; zFilename[i]; i++){
      if( zFilename[i]=='/' && zFilename[i+1]!=0 ){
        zFilename = &zFilename[i+1];
        i = -1;
      }
    }
    if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){
      zUrlTail = mprintf("tkt=%s&file=%t", zTarget, zFilename);


    }else{
      zUrlTail = mprintf("page=%t&file=%t", zTarget, zFilename);
    }
    @ <li><p>
    @ Attachment %z(href("%R/ainfo/%!S",zUuid))%S(zUuid)</a>
    if( moderation_pending(attachid) ){
      @ <span class="modpending">*** Awaiting Moderator Approval ***</span>
    }
    @ <br><a href="%R/attachview?%s(zUrlTail)">%h(zFilename)</a>
    @ [<a href="%R/attachdownload/%t(zFilename)?%s(zUrlTail)">download</a>]<br />
    if( zComment ) while( fossil_isspace(zComment[0]) ) zComment++;
    if( zComment && zComment[0] ){
      @ %!W(zComment)<br />
    }
    if( zPage==0 && zTkt==0 ){
      if( zSrc==0 || zSrc[0]==0 ){
        zSrc = "Deleted from";
      }else {
        zSrc = "Added to";
      }
      if( strlen(zTarget)==UUID_SIZE && validate16(zTarget, UUID_SIZE) ){
        @ %s(zSrc) ticket <a href="%R/tktview?name=%s(zTarget)">
        @ %S(zTarget)</a>



      }else{
        @ %s(zSrc) wiki page <a href="%R/wiki?name=%t(zTarget)">
        @ %h(zTarget)</a>
      }
    }else{
      if( zSrc==0 || zSrc[0]==0 ){
        @ Deleted






|
|
|
|
>
>




>







|

|
>
>
>
>
>
|
<









>
>
>
>
>



















>
>









|

>
>














|





|


>
>
>







24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
/*
** WEBPAGE: attachlist
** List attachments.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
**
** At most one of technote=, tkt= or page= are supplied. 
** If none is given, all attachments are listed.  If one is given,
** only attachments for the designated technote, ticket or wiki page
** are shown. TECHNOTEUUID and TICKETUUID may be just a prefix of the
** relevant tech note or ticket, in which case all attachments of all
** tech notes or tickets with the prefix will be listed.
*/
void attachlist_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");
  const char *zTechNote = P("technote");
  Blob sql;
  Stmt q;

  if( zPage && zTkt ) zTkt = 0;
  login_check_credentials();
  blob_zero(&sql);
  blob_append_sql(&sql,
     "SELECT datetime(mtime,toLocal()), src, target, filename,"
     "       comment, user,"
     "       (SELECT uuid FROM blob WHERE rid=attachid), attachid,"
     "       (CASE WHEN 'tkt-'||target IN (SELECT tagname FROM tag)"
     "                  THEN 1"
     "             WHEN 'event-'||target IN (SELECT tagname FROM tag)"
     "                  THEN 2"
     "             ELSE 0 END)"
     "  FROM attachment"

  );
  if( zPage ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    style_header("Attachments To %h", zPage);
    blob_append_sql(&sql, " WHERE target=%Q", zPage);
  }else if( zTkt ){
    if( g.perm.RdTkt==0 ){ login_needed(g.anon.RdTkt); return; }
    style_header("Attachments To Ticket %S", zTkt);
    blob_append_sql(&sql, " WHERE target GLOB '%q*'", zTkt);
  }else if( zTechNote ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    style_header("Attachments to Tech Note %S", zTechNote);
    blob_append_sql(&sql, " WHERE target GLOB '%q*'",
                    zTechNote);
  }else{
    if( g.perm.RdTkt==0 && g.perm.RdWiki==0 ){
      login_needed(g.anon.RdTkt || g.anon.RdWiki);
      return;
    }
    style_header("All Attachments");
  }
  blob_append_sql(&sql, " ORDER BY mtime DESC");
  db_prepare(&q, "%s", blob_sql_text(&sql));
  @ <ol>
  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zSrc = db_column_text(&q, 1);
    const char *zTarget = db_column_text(&q, 2);
    const char *zFilename = db_column_text(&q, 3);
    const char *zComment = db_column_text(&q, 4);
    const char *zUser = db_column_text(&q, 5);
    const char *zUuid = db_column_text(&q, 6);
    int attachid = db_column_int(&q, 7);
    // type 0 is a wiki page, 1 is a ticket, 2 is a tech note
    int type = db_column_int(&q, 8);
    const char *zDispUser = zUser && zUser[0] ? zUser : "anonymous";
    int i;
    char *zUrlTail;
    for(i=0; zFilename[i]; i++){
      if( zFilename[i]=='/' && zFilename[i+1]!=0 ){
        zFilename = &zFilename[i+1];
        i = -1;
      }
    }
    if( type==1 ){
      zUrlTail = mprintf("tkt=%s&file=%t", zTarget, zFilename);
    }else if( type==2 ){
      zUrlTail = mprintf("technote=%s&file=%t", zTarget, zFilename);
    }else{
      zUrlTail = mprintf("page=%t&file=%t", zTarget, zFilename);
    }
    @ <li><p>
    @ Attachment %z(href("%R/ainfo/%!S",zUuid))%S(zUuid)</a>
    if( moderation_pending(attachid) ){
      @ <span class="modpending">*** Awaiting Moderator Approval ***</span>
    }
    @ <br><a href="%R/attachview?%s(zUrlTail)">%h(zFilename)</a>
    @ [<a href="%R/attachdownload/%t(zFilename)?%s(zUrlTail)">download</a>]<br />
    if( zComment ) while( fossil_isspace(zComment[0]) ) zComment++;
    if( zComment && zComment[0] ){
      @ %!W(zComment)<br />
    }
    if( zPage==0 && zTkt==0 && zTechNote==0 ){
      if( zSrc==0 || zSrc[0]==0 ){
        zSrc = "Deleted from";
      }else {
        zSrc = "Added to";
      }
      if( type==1 ){
        @ %s(zSrc) ticket <a href="%R/tktview?name=%s(zTarget)">
        @ %S(zTarget)</a>
      }else if( type==2 ){
        @ %s(zSrc) tech note <a href="%R/technote/%s(zTarget)">
        @ %S(zTarget)</a>
      }else{
        @ %s(zSrc) wiki page <a href="%R/wiki?name=%t(zTarget)">
        @ %h(zTarget)</a>
      }
    }else{
      if( zSrc==0 || zSrc[0]==0 ){
        @ Deleted
137
138
139
140
141
142
143

144
145
146
147
148
149
150

151
152
153
154
155
156
157
158
159
160
161
162
163
164



165
166
167
168
169
170
171
** WEBPAGE: attachview
**
** Download or display an attachment.
** Query parameters:
**
**    tkt=TICKETUUID
**    page=WIKIPAGE

**    file=FILENAME
**    attachid=ID
**
*/
void attachview_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");

  const char *zFile = P("file");
  const char *zTarget = 0;
  int attachid = atoi(PD("attachid","0"));
  char *zUUID;

  if( zPage && zTkt ) zTkt = 0;
  if( zFile==0 ) fossil_redirect_home();
  login_check_credentials();
  if( zPage ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    zTarget = zPage;
  }else if( zTkt ){
    if( g.perm.RdTkt==0 ){ login_needed(g.anon.RdTkt); return; }
    zTarget = zTkt;



  }else{
    fossil_redirect_home();
  }
  if( attachid>0 ){
    zUUID = db_text(0,
       "SELECT coalesce(src,'x') FROM attachment"
       " WHERE target=%Q AND attachid=%d",






>







>





<








>
>
>







156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176

177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
** WEBPAGE: attachview
**
** Download or display an attachment.
** Query parameters:
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
**    technote=TECHNOTEUUID
**    file=FILENAME
**    attachid=ID
**
*/
void attachview_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");
  const char *zTechNote = P("technote");
  const char *zFile = P("file");
  const char *zTarget = 0;
  int attachid = atoi(PD("attachid","0"));
  char *zUUID;


  if( zFile==0 ) fossil_redirect_home();
  login_check_credentials();
  if( zPage ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    zTarget = zPage;
  }else if( zTkt ){
    if( g.perm.RdTkt==0 ){ login_needed(g.anon.RdTkt); return; }
    zTarget = zTkt;
  }else if( zTechNote ){
    if( g.perm.RdWiki==0 ){ login_needed(g.anon.RdWiki); return; }
    zTarget = zTechNote;
  }else{
    fossil_redirect_home();
  }
  if( attachid>0 ){
    zUUID = db_text(0,
       "SELECT coalesce(src,'x') FROM attachment"
       " WHERE target=%Q AND attachid=%d",
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

200
201
202
203
204
205
206
    style_footer();
    return;
  }else if( zUUID[0]=='x' ){
    style_header("Missing");
    @ Attachment has been deleted
    style_footer();
    return;
  }
  g.perm.Read = 1;
  cgi_replace_parameter("name",zUUID);
  if( fossil_strcmp(g.zPath,"attachview")==0 ){
    artifact_page();
  }else{
    cgi_replace_parameter("m", mimetype_from_name(zFile));
    rawartifact_page();

  }
}

/*
** Save an attachment control artifact into the repository
*/
static void attach_put(






|
|
|
|
|
|
|
|
>







208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
    style_footer();
    return;
  }else if( zUUID[0]=='x' ){
    style_header("Missing");
    @ Attachment has been deleted
    style_footer();
    return;
  }else{
    g.perm.Read = 1;
    cgi_replace_parameter("name",zUUID);
    if( fossil_strcmp(g.zPath,"attachview")==0 ){
      artifact_page();
    }else{
      cgi_replace_parameter("m", mimetype_from_name(zFile));
      rawartifact_page();
    }
  }
}

/*
** Save an attachment control artifact into the repository
*/
static void attach_put(
227
228
229
230
231
232
233

234
235
236
237
238
239

240
241
242
243
244
245
246
247
248




249

250
251
252
253
254
255
256
257
258
259
260
261
262














263
264
265
266
267
268
269
/*
** WEBPAGE: attachadd
** Add a new attachment.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE

**    from=URL
**
*/
void attachadd_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");

  const char *zFrom = P("from");
  const char *aContent = P("f");
  const char *zName = PD("f:filename","unknown");
  const char *zTarget;
  const char *zTargetType;
  int szContent = atoi(PD("f:bytes","0"));
  int goodCaptcha = 1;

  if( P("cancel") ) cgi_redirect(zFrom);




  if( zPage && zTkt ) fossil_redirect_home();

  if( zPage==0 && zTkt==0 ) fossil_redirect_home();
  login_check_credentials();
  if( zPage ){
    if( g.perm.ApndWiki==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndWiki && g.anon.Attach);
      return;
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='wiki-%q'", zPage) ){
      fossil_redirect_home();
    }
    zTarget = zPage;
    zTargetType = mprintf("Wiki Page <a href=\"%R/wiki?name=%h\">%h</a>",
                           zPage, zPage);














  }else{
    if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndTkt && g.anon.Attach);
      return;
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'", zTkt) ){
      zTkt = db_text(0, "SELECT substr(tagname,5) FROM tag"






>






>




|




>
>
>
>
|
>
|












>
>
>
>
>
>
>
>
>
>
>
>
>
>







251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
/*
** WEBPAGE: attachadd
** Add a new attachment.
**
**    tkt=TICKETUUID
**    page=WIKIPAGE
**    technote=TECHNOTEUUID
**    from=URL
**
*/
void attachadd_page(void){
  const char *zPage = P("page");
  const char *zTkt = P("tkt");
  const char *zTechNote = P("technote");
  const char *zFrom = P("from");
  const char *aContent = P("f");
  const char *zName = PD("f:filename","unknown");
  const char *zTarget;
  char *zTargetType;
  int szContent = atoi(PD("f:bytes","0"));
  int goodCaptcha = 1;

  if( P("cancel") ) cgi_redirect(zFrom);
  if( (zPage && zTkt)
   || (zPage && zTechNote)
   || (zTkt && zTechNote)
  ){
   fossil_redirect_home();
  }
  if( zPage==0 && zTkt==0 && zTechNote==0) fossil_redirect_home();
  login_check_credentials();
  if( zPage ){
    if( g.perm.ApndWiki==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndWiki && g.anon.Attach);
      return;
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='wiki-%q'", zPage) ){
      fossil_redirect_home();
    }
    zTarget = zPage;
    zTargetType = mprintf("Wiki Page <a href=\"%R/wiki?name=%h\">%h</a>",
                           zPage, zPage);
  }else if ( zTechNote ){
    if( g.perm.Write==0 || g.perm.ApndWiki==0 || g.perm.Attach==0 ){
      login_needed(g.anon.Write && g.anon.ApndWiki && g.anon.Attach);
      return;
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'", zTechNote) ){
      zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag"
                             " WHERE tagname GLOB 'event-%q*'", zTechNote);
      if( zTechNote==0) fossil_redirect_home();
    }
    zTarget = zTechNote;
    zTargetType = mprintf("Tech Note <a href=\"%R/technote/%h\">%h</a>",
                           zTechNote, zTechNote);
  
  }else{
    if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){
      login_needed(g.anon.ApndTkt && g.anon.Attach);
      return;
    }
    if( !db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'", zTkt) ){
      zTkt = db_text(0, "SELECT substr(tagname,5) FROM tag"
339
340
341
342
343
344
345


346
347
348
349
350
351
352
353
354
355

356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374

375
376
377
378
379
380
381
  @ <div>
  @ File to Attach:
  @ <input type="file" name="f" size="60" /><br />
  @ Description:<br />
  @ <textarea name="comment" cols="80" rows="5" wrap="virtual"></textarea><br />
  if( zTkt ){
    @ <input type="hidden" name="tkt" value="%h(zTkt)" />


  }else{
    @ <input type="hidden" name="page" value="%h(zPage)" />
  }
  @ <input type="hidden" name="from" value="%h(zFrom)" />
  @ <input type="submit" name="ok" value="Add Attachment" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </div>
  captcha_generate(0);
  @ </form>
  style_footer();

}

/*
** WEBPAGE: ainfo
** URL: /ainfo?name=ARTIFACTID
**
** Show the details of an attachment artifact.
*/
void ainfo_page(void){
  int rid;                       /* RID for the control artifact */
  int ridSrc;                    /* RID for the attached file */
  char *zDate;                   /* Date attached */
  const char *zUuid;             /* UUID of the control artifact */
  Manifest *pAttach;             /* Parse of the control artifact */
  const char *zTarget;           /* Wiki or ticket attached to */
  const char *zSrc;              /* UUID of the attached file */
  const char *zName;             /* Name of the attached file */
  const char *zDesc;             /* Description of the attached file */
  const char *zWikiName = 0;     /* Wiki page name when attached to Wiki */

  const char *zTktUuid = 0;      /* Ticket ID when attached to a ticket */
  int modPending;                /* True if awaiting moderation */
  const char *zModAction;        /* Moderation action or NULL */
  int isModerator;               /* TRUE if user is the moderator */
  const char *zMime;             /* MIME Type */
  Blob attach;                   /* Content of the attachment */
  int fShowContent = 0;






>
>










>














|




>







384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
  @ <div>
  @ File to Attach:
  @ <input type="file" name="f" size="60" /><br />
  @ Description:<br />
  @ <textarea name="comment" cols="80" rows="5" wrap="virtual"></textarea><br />
  if( zTkt ){
    @ <input type="hidden" name="tkt" value="%h(zTkt)" />
  }else if( zTechNote ){
    @ <input type="hidden" name="technote" value="%h(zTechNote)" />
  }else{
    @ <input type="hidden" name="page" value="%h(zPage)" />
  }
  @ <input type="hidden" name="from" value="%h(zFrom)" />
  @ <input type="submit" name="ok" value="Add Attachment" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </div>
  captcha_generate(0);
  @ </form>
  style_footer();
  fossil_free(zTargetType);
}

/*
** WEBPAGE: ainfo
** URL: /ainfo?name=ARTIFACTID
**
** Show the details of an attachment artifact.
*/
void ainfo_page(void){
  int rid;                       /* RID for the control artifact */
  int ridSrc;                    /* RID for the attached file */
  char *zDate;                   /* Date attached */
  const char *zUuid;             /* UUID of the control artifact */
  Manifest *pAttach;             /* Parse of the control artifact */
  const char *zTarget;           /* Wiki, ticket or tech note attached to */
  const char *zSrc;              /* UUID of the attached file */
  const char *zName;             /* Name of the attached file */
  const char *zDesc;             /* Description of the attached file */
  const char *zWikiName = 0;     /* Wiki page name when attached to Wiki */
  const char *zTNUuid = 0;       /* Tech Note ID when attached to tech note */
  const char *zTktUuid = 0;      /* Ticket ID when attached to a ticket */
  int modPending;                /* True if awaiting moderation */
  const char *zModAction;        /* Moderation action or NULL */
  int isModerator;               /* TRUE if user is the moderator */
  const char *zMime;             /* MIME Type */
  Blob attach;                   /* Content of the attachment */
  int fShowContent = 0;
421
422
423
424
425
426
427






428
429
430
431
432


433
434
435
436
437
438
439
    }
  }else if( db_exists("SELECT 1 FROM tag WHERE tagname='wiki-%q'",zTarget) ){
    zWikiName = zTarget;
    if( !g.perm.RdWiki ){ login_needed(g.anon.RdWiki); return; }
    if( g.perm.WrWiki ){
      style_submenu_element("Delete","Delete","%R/ainfo/%s?del", zUuid);
    }






  }
  zDate = db_text(0, "SELECT datetime(%.12f)", pAttach->rDate);

  if( P("confirm")
   && ((zTktUuid && g.perm.WrTkt) || (zWikiName && g.perm.WrWiki))


  ){
    int i, n, rid;
    char *zDate;
    Blob manifest;
    Blob cksum;
    const char *zFile = zName;







>
>
>
>
>
>




|
>
>







470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
    }
  }else if( db_exists("SELECT 1 FROM tag WHERE tagname='wiki-%q'",zTarget) ){
    zWikiName = zTarget;
    if( !g.perm.RdWiki ){ login_needed(g.anon.RdWiki); return; }
    if( g.perm.WrWiki ){
      style_submenu_element("Delete","Delete","%R/ainfo/%s?del", zUuid);
    }
  }else if( db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'",zTarget) ){
    zTNUuid = zTarget;
    if( !g.perm.RdWiki ){ login_needed(g.anon.RdWiki); return; }
    if( g.perm.Write && g.perm.WrWiki ){
      style_submenu_element("Delete","Delete","%R/ainfo/%s?del", zUuid);
    }
  }
  zDate = db_text(0, "SELECT datetime(%.12f)", pAttach->rDate);

  if( P("confirm")
   && ((zTktUuid && g.perm.WrTkt) || 
       (zWikiName && g.perm.WrWiki) ||
       (zTNUuid && g.perm.Write && g.perm.WrWiki))
  ){
    int i, n, rid;
    char *zDate;
    Blob manifest;
    Blob cksum;
    const char *zFile = zName;

453
454
455
456
457
458
459
460


461
462
463
464
465
466
467
    rid = content_put(&manifest);
    manifest_crosslink(rid, &manifest, MC_NONE);
    db_end_transaction(0);
    @ <p>The attachment below has been deleted.</p>
  }

  if( P("del")
   && ((zTktUuid && g.perm.WrTkt) || (zWikiName && g.perm.WrWiki))


  ){
    form_begin(0, "%R/ainfo/%!S", zUuid);
    @ <p>Confirm you want to delete the attachment shown below.
    @ <input type="submit" name="confirm" value="Confirm">
    @ </form>
  }







|
>
>







510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
    rid = content_put(&manifest);
    manifest_crosslink(rid, &manifest, MC_NONE);
    db_end_transaction(0);
    @ <p>The attachment below has been deleted.</p>
  }

  if( P("del")
   && ((zTktUuid && g.perm.WrTkt) ||
       (zWikiName && g.perm.WrWiki) ||
       (zTNUuid && g.perm.Write && g.perm.WrWiki))
  ){
    form_begin(0, "%R/ainfo/%!S", zUuid);
    @ <p>Confirm you want to delete the attachment shown below.
    @ <input type="submit" name="confirm" value="Confirm">
    @ </form>
  }

501
502
503
504
505
506
507




508
509
510
511
512
513
514
  if( modPending ){
    @ <span class="modpending">*** Awaiting Moderator Approval ***</span>
  }
  if( zTktUuid ){
    @ <tr><th>Ticket:</th>
    @ <td>%z(href("%R/tktview/%s",zTktUuid))%s(zTktUuid)</a></td></tr>
  }




  if( zWikiName ){
    @ <tr><th>Wiki&nbsp;Page:</th>
    @ <td>%z(href("%R/wiki?name=%t",zWikiName))%h(zWikiName)</a></td></tr>
  }
  @ <tr><th>Date:</th><td>
  hyperlink_to_date(zDate, "</td></tr>");
  @ <tr><th>User:</th><td>






>
>
>
>







560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
  if( modPending ){
    @ <span class="modpending">*** Awaiting Moderator Approval ***</span>
  }
  if( zTktUuid ){
    @ <tr><th>Ticket:</th>
    @ <td>%z(href("%R/tktview/%s",zTktUuid))%s(zTktUuid)</a></td></tr>
  }
  if( zTNUuid ){
    @ <tr><th>Tech Note:</th>
    @ <td>%z(href("%R/technote/%s",zTNUuid))%s(zTNUuid)</a></td></tr>
  }
  if( zWikiName ){
    @ <tr><th>Wiki&nbsp;Page:</th>
    @ <td>%z(href("%R/wiki?name=%t",zWikiName))%h(zWikiName)</a></td></tr>
  }
  @ <tr><th>Date:</th><td>
  hyperlink_to_date(zDate, "</td></tr>");
  @ <tr><th>User:</th><td>
550
551
552
553
554
555
556


557
558
559
560
561
562
563
      output_text_with_line_numbers(z, zLn);
    }else{
      @ <pre>
      @ %h(z)
      @ </pre>
    }
  }else if( strncmp(zMime, "image/", 6)==0 ){


    @ <img src="%R/raw/%s(zSrc)?m=%s(zMime)"></img>
    style_submenu_element("Image", "Image", "%R/raw/%s?m=%s", zSrc, zMime);
  }else{
    int sz = db_int(0, "SELECT size FROM blob WHERE rid=%d", ridSrc);
    @ <i>(file is %d(sz) bytes of binary data)</i>
  }
  @ </blockquote>






>
>







613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
      output_text_with_line_numbers(z, zLn);
    }else{
      @ <pre>
      @ %h(z)
      @ </pre>
    }
  }else if( strncmp(zMime, "image/", 6)==0 ){
    int sz = db_int(0, "SELECT size FROM blob WHERE rid=%d", ridSrc);
    @ <i>(file is %d(sz) bytes of image data)</i><br>
    @ <img src="%R/raw/%s(zSrc)?m=%s(zMime)"></img>
    style_submenu_element("Image", "Image", "%R/raw/%s?m=%s", zSrc, zMime);
  }else{
    int sz = db_int(0, "SELECT size FROM blob WHERE rid=%d", ridSrc);
    @ <i>(file is %d(sz) bytes of binary data)</i>
  }
  @ </blockquote>
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
void attachment_list(
  const char *zTarget,   /* Object that things are attached to */
  const char *zHeader    /* Header to display with attachments */
){
  int cnt = 0;
  Stmt q;
  db_prepare(&q,
     "SELECT datetime(mtime%s), filename, user,"
     "       (SELECT uuid FROM blob WHERE rid=attachid), src"
     "  FROM attachment"
     " WHERE isLatest AND src!='' AND target=%Q"
     " ORDER BY mtime DESC",
     timeline_utc(), zTarget
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zFile = db_column_text(&q, 1);
    const char *zUser = db_column_text(&q, 2);
    const char *zUuid = db_column_text(&q, 3);
    const char *zSrc = db_column_text(&q, 4);






|




|







637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
void attachment_list(
  const char *zTarget,   /* Object that things are attached to */
  const char *zHeader    /* Header to display with attachments */
){
  int cnt = 0;
  Stmt q;
  db_prepare(&q,
     "SELECT datetime(mtime,toLocal()), filename, user,"
     "       (SELECT uuid FROM blob WHERE rid=attachid), src"
     "  FROM attachment"
     " WHERE isLatest AND src!='' AND target=%Q"
     " ORDER BY mtime DESC",
     zTarget
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zFile = db_column_text(&q, 1);
    const char *zUser = db_column_text(&q, 2);
    const char *zUuid = db_column_text(&q, 3);
    const char *zSrc = db_column_text(&q, 4);
Changes to src/bisect.c.
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

200
201
202
203

204










205
206
207

208
209
210
211
212
213
214

215
216

217
218
219
220
221
222
223
224
225
226
  db_multi_exec(
     "REPLACE INTO vvar(name,value) VALUES('bisect-log',"
       "COALESCE((SELECT value||' ' FROM vvar WHERE name='bisect-log'),'')"
       " || '%d')", rid);
}

/*
** Show a chart of bisect "good" and "bad" versions.  The chart can be
** sorted either chronologically by bisect time, or by check-in time.
*/
static void bisect_chart(int sortByCkinTime){
  char *zLog = db_lget("bisect-log","");
  Blob log, id;
  Stmt q;
  int cnt = 0;
  blob_init(&log, zLog, -1);
  db_multi_exec(
     "CREATE TEMP TABLE bilog("
     "  seq INTEGER PRIMARY KEY,"  /* Sequence of events */
     "  stat TEXT,"                /* Type of occurrence */
     "  rid INTEGER"               /* Check-in number */
     ");"
  );
  db_prepare(&q, "INSERT OR IGNORE INTO bilog(seq,stat,rid)"
                 " VALUES(:seq,:stat,:rid)");
  while( blob_token(&log, &id) ){
    int rid = atoi(blob_str(&id));
    db_bind_int(&q, ":seq", ++cnt);
    db_bind_text(&q, ":stat", rid>0 ? "GOOD" : "BAD");
    db_bind_int(&q, ":rid", rid>=0 ? rid : -rid);
    db_step(&q);
    db_reset(&q);
  }

  db_bind_int(&q, ":seq", ++cnt);
  db_bind_text(&q, ":stat", "CURRENT");
  db_bind_int(&q, ":rid", db_lget_int("checkout", 0));
  db_step(&q);

  db_finalize(&q);










  db_prepare(&q,
    "SELECT bilog.seq, bilog.stat,"
    "       substr(blob.uuid,1,16), datetime(event.mtime)"

    "  FROM bilog, blob, event"
    " WHERE blob.rid=bilog.rid AND event.objid=bilog.rid"
    "   AND event.type='ci'"
    " ORDER BY %s bilog.rowid ASC",
    (sortByCkinTime ? "event.mtime DESC, " : "")
  );
  while( db_step(&q)==SQLITE_ROW ){

    fossil_print("%3d %-7s %s %s\n",
        db_column_int(&q, 0),

        db_column_text(&q, 1),
        db_column_text(&q, 3),
        db_column_text(&q, 2));
  }
  db_finalize(&q);
}

/*
** COMMAND: bisect
**






|
|

|









|












>
|
|
|
|
>

>
>
>
>
>
>
>
>
>
>


|
>




|


>
|

>
|
|
|







167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
  db_multi_exec(
     "REPLACE INTO vvar(name,value) VALUES('bisect-log',"
       "COALESCE((SELECT value||' ' FROM vvar WHERE name='bisect-log'),'')"
       " || '%d')", rid);
}

/*
** Create a TEMP table named "bilog" that contains the complete history
** of the current bisect.
*/
void bisect_create_bilog_table(int iCurrent){
  char *zLog = db_lget("bisect-log","");
  Blob log, id;
  Stmt q;
  int cnt = 0;
  blob_init(&log, zLog, -1);
  db_multi_exec(
     "CREATE TEMP TABLE bilog("
     "  seq INTEGER PRIMARY KEY,"  /* Sequence of events */
     "  stat TEXT,"                /* Type of occurrence */
     "  rid INTEGER UNIQUE"        /* Check-in number */
     ");"
  );
  db_prepare(&q, "INSERT OR IGNORE INTO bilog(seq,stat,rid)"
                 " VALUES(:seq,:stat,:rid)");
  while( blob_token(&log, &id) ){
    int rid = atoi(blob_str(&id));
    db_bind_int(&q, ":seq", ++cnt);
    db_bind_text(&q, ":stat", rid>0 ? "GOOD" : "BAD");
    db_bind_int(&q, ":rid", rid>=0 ? rid : -rid);
    db_step(&q);
    db_reset(&q);
  }
  if( iCurrent>0 ){
    db_bind_int(&q, ":seq", ++cnt);
    db_bind_text(&q, ":stat", "CURRENT");
    db_bind_int(&q, ":rid", iCurrent);
    db_step(&q);
  }
  db_finalize(&q);
}

/*
** Show a chart of bisect "good" and "bad" versions.  The chart can be
** sorted either chronologically by bisect time, or by check-in time.
*/
static void bisect_chart(int sortByCkinTime){
  Stmt q;
  int iCurrent = db_lget_int("checkout",0);
  bisect_create_bilog_table(iCurrent);
  db_prepare(&q,
    "SELECT bilog.seq, bilog.stat,"
    "       substr(blob.uuid,1,16), datetime(event.mtime),"
    "       blob.rid==%d"
    "  FROM bilog, blob, event"
    " WHERE blob.rid=bilog.rid AND event.objid=bilog.rid"
    "   AND event.type='ci'"
    " ORDER BY %s bilog.rowid ASC",
    iCurrent, (sortByCkinTime ? "event.mtime DESC, " : "")
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zGoodBad = db_column_text(&q, 1);
    fossil_print("%3d %-7s %s %s%s\n",
        db_column_int(&q, 0),
        zGoodBad,
        db_column_text(&q, 3),
        db_column_text(&q, 2),
        (db_column_int(&q, 4) && zGoodBad[0]!='C') ? " CURRENT" : "");
  }
  db_finalize(&q);
}

/*
** COMMAND: bisect
**
260
261
262
263
264
265
266





267
268
269
270
271
272
273
274
275
276
277
278
279
280

281
282
283
284
285
286
287
**     Reinitialize a bisect session.  This cancels prior bisect history
**     and allows a bisect session to start over from the beginning.
**
**   fossil bisect vlist|ls|status ?-a|--all?
**
**     List the versions in between "bad" and "good".
**





**   fossil bisect undo
**
**     Undo the most recent "good" or "bad" command.
**
** Summary:
**
**   fossil bisect bad ?VERSION?
**   fossil bisect good ?VERSION?
**   fossil bisect log
**   fossil bisect chart
**   fossil bisect next
**   fossil bisect options
**   fossil bisect reset
**   fossil bisect status

**   fossil bisect undo
*/
void bisect_cmd(void){
  int n;
  const char *zCmd;
  int foundCmd = 0;
  db_must_be_within_tree();






>
>
>
>
>














>







275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
**     Reinitialize a bisect session.  This cancels prior bisect history
**     and allows a bisect session to start over from the beginning.
**
**   fossil bisect vlist|ls|status ?-a|--all?
**
**     List the versions in between "bad" and "good".
**
**   fossil bisect ui
**
**     Like "fossil ui" except start on a timeline that shows only the
**     check-ins that are part of the current bisect.
**
**   fossil bisect undo
**
**     Undo the most recent "good" or "bad" command.
**
** Summary:
**
**   fossil bisect bad ?VERSION?
**   fossil bisect good ?VERSION?
**   fossil bisect log
**   fossil bisect chart
**   fossil bisect next
**   fossil bisect options
**   fossil bisect reset
**   fossil bisect status
**   fossil bisect ui
**   fossil bisect undo
*/
void bisect_cmd(void){
  int n;
  const char *zCmd;
  int foundCmd = 0;
  db_must_be_within_tree();
415
416
417
418
419
420
421










422
423
424
425
426
427
428
429
430
431
      usage("bisect option ?NAME? ?VALUE?");
    }
  }else if( strncmp(zCmd, "reset", n)==0 ){
    db_multi_exec(
      "DELETE FROM vvar WHERE name IN "
      " ('bisect-good', 'bisect-bad', 'bisect-log')"
    );










  }else if( strncmp(zCmd, "vlist", n)==0
         || strncmp(zCmd, "ls", n)==0
         || strncmp(zCmd, "status", n)==0
  ){
    int fAll = find_option("all", "a", 0)!=0;
    bisect_list(!fAll);
  }else if( !foundCmd ){
    usage("bad|good|log|next|options|reset|status|undo");
  }
}






>
>
>
>
>
>
>
>
>
>







|


436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
      usage("bisect option ?NAME? ?VALUE?");
    }
  }else if( strncmp(zCmd, "reset", n)==0 ){
    db_multi_exec(
      "DELETE FROM vvar WHERE name IN "
      " ('bisect-good', 'bisect-bad', 'bisect-log')"
    );
  }else if( strcmp(zCmd, "ui")==0 ){
    char *newArgv[8];
    newArgv[0] = g.argv[0];
    newArgv[1] = "ui";
    newArgv[2] = "--page";
    newArgv[3] = "timeline?bisect";
    newArgv[4] = 0;
    g.argv = newArgv;
    g.argc = 4;
    cmd_webserver();
  }else if( strncmp(zCmd, "vlist", n)==0
         || strncmp(zCmd, "ls", n)==0
         || strncmp(zCmd, "status", n)==0
  ){
    int fAll = find_option("all", "a", 0)!=0;
    bisect_list(!fAll);
  }else if( !foundCmd ){
    usage("bad|good|log|next|options|reset|status|ui|undo");
  }
}
Changes to src/blob.c.
269
270
271
272
273
274
275

276
277
278
279
280
281
282
  pBlob->xRealloc = blobReallocStatic;
}

/*
** Append text or data to the end of a blob.
*/
void blob_append(Blob *pBlob, const char *aData, int nData){

  blob_is_init(pBlob);
  if( nData<0 ) nData = strlen(aData);
  if( nData==0 ) return;
  if( pBlob->nUsed + nData >= pBlob->nAlloc ){
    pBlob->xRealloc(pBlob, pBlob->nUsed + nData + pBlob->nAlloc + 100);
    if( pBlob->nUsed + nData >= pBlob->nAlloc ){
      blob_panic();






>







269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
  pBlob->xRealloc = blobReallocStatic;
}

/*
** Append text or data to the end of a blob.
*/
void blob_append(Blob *pBlob, const char *aData, int nData){
  assert( aData!=0 || nData==0 );
  blob_is_init(pBlob);
  if( nData<0 ) nData = strlen(aData);
  if( nData==0 ) return;
  if( pBlob->nUsed + nData >= pBlob->nAlloc ){
    pBlob->xRealloc(pBlob, pBlob->nUsed + nData + pBlob->nAlloc + 100);
    if( pBlob->nUsed + nData >= pBlob->nAlloc ){
      blob_panic();
Changes to src/browse.c.
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
    pM = manifest_get_by_name(zCI, &rid);
    if( pM ){
      int trunkRid = symbolic_name_to_rid("tag:trunk", "ci");
      linkTrunk = trunkRid && rid != trunkRid;
      linkTip = rid != symbolic_name_to_rid("tip", "ci");
      zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      rNow = db_double(0.0, "SELECT mtime FROM event WHERE objid=%d", rid);
      zNow = db_text("", "SELECT datetime(mtime,'localtime')"
                         " FROM event WHERE objid=%d", rid);
    }else{
      zCI = 0;
    }
  }
  if( zCI==0 ){
    rNow = db_double(0.0, "SELECT max(mtime) FROM event");
    zNow = db_text("", "SELECT datetime(max(mtime),'localtime') FROM event");
  }

  /* Compute the title of the page */
  blob_zero(&dirname);
  if( zD ){
    blob_append(&dirname, "within directory ", -1);
    hyperlinked_path(zD, &dirname, zCI, "tree", zREx);






|







|







597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
    pM = manifest_get_by_name(zCI, &rid);
    if( pM ){
      int trunkRid = symbolic_name_to_rid("tag:trunk", "ci");
      linkTrunk = trunkRid && rid != trunkRid;
      linkTip = rid != symbolic_name_to_rid("tip", "ci");
      zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      rNow = db_double(0.0, "SELECT mtime FROM event WHERE objid=%d", rid);
      zNow = db_text("", "SELECT datetime(mtime,toLocal())"
                         " FROM event WHERE objid=%d", rid);
    }else{
      zCI = 0;
    }
  }
  if( zCI==0 ){
    rNow = db_double(0.0, "SELECT max(mtime) FROM event");
    zNow = db_text("", "SELECT datetime(max(mtime),toLocal()) FROM event");
  }

  /* Compute the title of the page */
  blob_zero(&dirname);
  if( zD ){
    blob_append(&dirname, "within directory ", -1);
    hyperlinked_path(zD, &dirname, zCI, "tree", zREx);
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
  if( zName==0 ) zName = "tip";
  rid = symbolic_name_to_rid(zName, "ci");
  if( rid==0 ){
    fossil_fatal("not a valid check-in: %s", zName);
  }
  zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
  baseTime = db_double(0.0,"SELECT mtime FROM event WHERE objid=%d", rid);
  zNow = db_text("", "SELECT datetime(mtime,'localtime') FROM event"
                     " WHERE objid=%d", rid);
  style_submenu_element("Tree-View", "Tree-View",
                        "%R/tree?ci=%T&mtime=1&type=tree",
                        zName);
  style_header("File Ages");
  zGlob = P("glob");
  compute_fileage(rid,zGlob);






|







1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
  if( zName==0 ) zName = "tip";
  rid = symbolic_name_to_rid(zName, "ci");
  if( rid==0 ){
    fossil_fatal("not a valid check-in: %s", zName);
  }
  zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
  baseTime = db_double(0.0,"SELECT mtime FROM event WHERE objid=%d", rid);
  zNow = db_text("", "SELECT datetime(mtime,toLocal()) FROM event"
                     " WHERE objid=%d", rid);
  style_submenu_element("Tree-View", "Tree-View",
                        "%R/tree?ci=%T&mtime=1&type=tree",
                        zName);
  style_header("File Ages");
  zGlob = P("glob");
  compute_fileage(rid,zGlob);
Changes to src/cache.c.
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
**
**    list|ls      List the keys and content sizes and other stats for
**                 all entries currently in the cache
**
**    status       Show a summary of cache status.
**
** The cache is stored in a file that is distinct from the repository
** but that is held in the same directory as the repository.  To cache
** file can be deleted in order to completely disable the cache.
*/
void cache_cmd(void){
  const char *zCmd;
  int nCmd;
  sqlite3 *db;
  sqlite3_stmt *pStmt;






|







250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
**
**    list|ls      List the keys and content sizes and other stats for
**                 all entries currently in the cache
**
**    status       Show a summary of cache status.
**
** The cache is stored in a file that is distinct from the repository
** but that is held in the same directory as the repository.  The cache
** file can be deleted in order to completely disable the cache.
*/
void cache_cmd(void){
  const char *zCmd;
  int nCmd;
  sqlite3 *db;
  sqlite3_stmt *pStmt;
Changes to src/cgi.c.
794
795
796
797
798
799
800

801
802
803
804
805
806
807
*/
void cgi_parse_POST_JSON( FILE * zIn, unsigned int contentLen ){
  cson_value * jv = NULL;
  int rc;
  CgiPostReadState state;
  cson_parse_opt popt = cson_parse_opt_empty;
  cson_parse_info pinfo = cson_parse_info_empty;

  popt.maxDepth = 15;
  state.fh = zIn;
  state.len = contentLen;
  state.pos = 0;
  rc = cson_parse( &jv,
                   contentLen ? cson_data_source_FILE_n : cson_data_source_FILE,
                   contentLen ? (void *)&state : (void *)zIn, &popt, &pinfo );






>







794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
*/
void cgi_parse_POST_JSON( FILE * zIn, unsigned int contentLen ){
  cson_value * jv = NULL;
  int rc;
  CgiPostReadState state;
  cson_parse_opt popt = cson_parse_opt_empty;
  cson_parse_info pinfo = cson_parse_info_empty;
  assert(g.json.gc.a && "json_main_bootstrap() was not called!");
  popt.maxDepth = 15;
  state.fh = zIn;
  state.len = contentLen;
  state.pos = 0;
  rc = cson_parse( &jv,
                   contentLen ? cson_data_source_FILE_n : cson_data_source_FILE,
                   contentLen ? (void *)&state : (void *)zIn, &popt, &pinfo );
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
    }
  }

  /* If no match is found and the name begins with an upper-case
  ** letter, then check to see if there is an environment variable
  ** with the given name.
  */
  if( fossil_isupper(zName[0]) ){
    const char *zValue = fossil_getenv(zName);
    if( zValue ){
      cgi_set_parameter_nocopy(zName, zValue, 0);
      CGIDEBUG(("env-match [%s] = [%s]\n", zName, zValue));
      return zValue;
    }
  }






|







1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
    }
  }

  /* If no match is found and the name begins with an upper-case
  ** letter, then check to see if there is an environment variable
  ** with the given name.
  */
  if( zName && fossil_isupper(zName[0]) ){
    const char *zValue = fossil_getenv(zName);
    if( zValue ){
      cgi_set_parameter_nocopy(zName, zValue, 0);
      CGIDEBUG(("env-match [%s] = [%s]\n", zName, zValue));
      return zValue;
    }
  }
Changes to src/checkin.c.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
/*
** Copyright (c) 2007 D. Richard Hipp
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the Simplified BSD License (also
** known as the "2-Clause License" or "FreeBSD License".)

** This program is distributed in the hope that it will be useful,
** but without any warranty; without even the implied warranty of
** merchantability or fitness for a particular purpose.
**
** Author contact information:
**   drh@hwaci.com
**   http://www.hwaci.com/drh/





|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
/*
** Copyright (c) 2007 D. Richard Hipp
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the Simplified BSD License (also
** known as the "2-Clause License" or "FreeBSD License".)
**
** This program is distributed in the hope that it will be useful,
** but without any warranty; without even the implied warranty of
** merchantability or fitness for a particular purpose.
**
** Author contact information:
**   drh@hwaci.com
**   http://www.hwaci.com/drh/
60
61
62
63
64
65
66
67

68
69
70
71
72
73
74
75
76
77
78
79
80
81

82
83
84
85
86
87
88
      (blob_size(&where)>0) ? "OR" : "AND", zName,
      filename_collation(), zName, filename_collation(),
      zName, filename_collation()
    );
  }

  db_prepare(&q,
    "SELECT pathname, deleted, chnged, rid, coalesce(origname!=pathname,0)"

    "  FROM vfile "
    " WHERE is_selected(id) %s"
    "   AND (chnged OR deleted OR rid=0 OR pathname!=origname)"
    " ORDER BY 1 /*scan*/",
    blob_sql_text(&where)
  );
  blob_zero(&rewrittenPathname);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    const char *zDisplayName = zPathname;
    int isDeleted = db_column_int(&q, 1);
    int isChnged = db_column_int(&q,2);
    int isNew = db_column_int(&q,3)==0;
    int isRenamed = db_column_int(&q,4);

    char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname);
    if( cwdRelative ){
      file_relative_name(zFullName, &rewrittenPathname, 0);
      zDisplayName = blob_str(&rewrittenPathname);
      if( zDisplayName[0]=='.' && zDisplayName[1]=='/' ){
        zDisplayName += 2;  /* no unnecessary ./ prefix */
      }






|
>














>







60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
      (blob_size(&where)>0) ? "OR" : "AND", zName,
      filename_collation(), zName, filename_collation(),
      zName, filename_collation()
    );
  }

  db_prepare(&q,
    "SELECT pathname, deleted, chnged,"
    "       rid, coalesce(origname!=pathname,0), islink"
    "  FROM vfile "
    " WHERE is_selected(id) %s"
    "   AND (chnged OR deleted OR rid=0 OR pathname!=origname)"
    " ORDER BY 1 /*scan*/",
    blob_sql_text(&where)
  );
  blob_zero(&rewrittenPathname);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    const char *zDisplayName = zPathname;
    int isDeleted = db_column_int(&q, 1);
    int isChnged = db_column_int(&q,2);
    int isNew = db_column_int(&q,3)==0;
    int isRenamed = db_column_int(&q,4);
    int isLink = db_column_int(&q,5);
    char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname);
    if( cwdRelative ){
      file_relative_name(zFullName, &rewrittenPathname, 0);
      zDisplayName = blob_str(&rewrittenPathname);
      if( zDisplayName[0]=='.' && zDisplayName[1]=='/' ){
        zDisplayName += 2;  /* no unnecessary ./ prefix */
      }
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        blob_appendf(report, "EXECUTABLE %s\n", zDisplayName);
      }else if( isChnged==7 ){
        blob_appendf(report, "SYMLINK    %s\n", zDisplayName);
      }else if( isChnged==8 ){
        blob_appendf(report, "UNEXEC     %s\n", zDisplayName);
      }else if( isChnged==9 ){
        blob_appendf(report, "UNLINK     %s\n", zDisplayName);
      }else if( file_contains_merge_marker(zFullName) ){
        blob_appendf(report, "CONFLICT   %s\n", zDisplayName);
      }else{
        blob_appendf(report, "EDITED     %s\n", zDisplayName);
      }
    }else if( isRenamed ){
      blob_appendf(report, "RENAMED    %s\n", zDisplayName);
    }else{






|







121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
        blob_appendf(report, "EXECUTABLE %s\n", zDisplayName);
      }else if( isChnged==7 ){
        blob_appendf(report, "SYMLINK    %s\n", zDisplayName);
      }else if( isChnged==8 ){
        blob_appendf(report, "UNEXEC     %s\n", zDisplayName);
      }else if( isChnged==9 ){
        blob_appendf(report, "UNLINK     %s\n", zDisplayName);
      }else if( !isLink && file_contains_merge_marker(zFullName) ){
        blob_appendf(report, "CONFLICT   %s\n", zDisplayName);
      }else{
        blob_appendf(report, "EDITED     %s\n", zDisplayName);
      }
    }else if( isRenamed ){
      blob_appendf(report, "RENAMED    %s\n", zDisplayName);
    }else{
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
  if( timeOrder ){
    zOrderBy = "mtime DESC";
  }

  compute_fileage(rid,0);
  db_prepare(&q,
    "SELECT datetime(fileage.mtime, 'localtime'), fileage.pathname,\n"
    "       blob.size\n"
    "  FROM fileage, blob\n"
    " WHERE blob.rid=fileage.fid %s\n"
    " ORDER BY %s;", blob_sql_text(&where), zOrderBy /*safe-for-%s*/
  );
  blob_reset(&where);







|







325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  if( timeOrder ){
    zOrderBy = "mtime DESC";
  }

  compute_fileage(rid,0);
  db_prepare(&q,
    "SELECT datetime(fileage.mtime, toLocal()), fileage.pathname,\n"
    "       blob.size\n"
    "  FROM fileage, blob\n"
    " WHERE blob.rid=fileage.fid %s\n"
    " ORDER BY %s;", blob_sql_text(&where), zOrderBy /*safe-for-%s*/
  );
  blob_reset(&where);

392
393
394
395
396
397
398


399
400
401
402
403
404
405
  timeOrder = find_option("t","t",0)!=0;

  if( zRev!=0 ){
    db_find_and_open_repository(0, 0);
    verify_all_options();
    ls_cmd_rev(zRev,verboseFlag,showAge,timeOrder);
    return;


  }

  db_must_be_within_tree();
  vid = db_lget_int("checkout", 0);
  if( timeOrder ){
    if( showAge ){
      zOrderBy = mprintf("checkin_mtime(%d,rid) DESC", vid);






>
>







394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
  timeOrder = find_option("t","t",0)!=0;

  if( zRev!=0 ){
    db_find_and_open_repository(0, 0);
    verify_all_options();
    ls_cmd_rev(zRev,verboseFlag,showAge,timeOrder);
    return;
  }else if( find_option("R",0,1)!=0 ){
    fossil_fatal("the -r is required in addition to -R");
  }

  db_must_be_within_tree();
  vid = db_lget_int("checkout", 0);
  if( timeOrder ){
    if( showAge ){
      zOrderBy = mprintf("checkin_mtime(%d,rid) DESC", vid);
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439

440
441
442
443
444
445
446
447
448
449
450

451
452
453
454
455
456
457
       zName, filename_collation()
    );
  }
  vfile_check_signature(vid, 0);
  if( showAge ){
    db_prepare(&q,
       "SELECT pathname, deleted, rid, chnged, coalesce(origname!=pathname,0),"
       "       datetime(checkin_mtime(%d,rid),'unixepoch'%s)"
       "  FROM vfile %s"
       " ORDER BY %s",
       vid, timeline_utc(), blob_sql_text(&where), zOrderBy /*safe-for-%s*/
    );
  }else{
    db_prepare(&q,
       "SELECT pathname, deleted, rid, chnged, coalesce(origname!=pathname,0)"

       "  FROM vfile %s"
       " ORDER BY %s", blob_sql_text(&where), zOrderBy /*safe-for-%s*/
    );
  }
  blob_reset(&where);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    int isDeleted = db_column_int(&q, 1);
    int isNew = db_column_int(&q,2)==0;
    int chnged = db_column_int(&q,3);
    int renamed = db_column_int(&q,4);

    char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname);
    const char *type = "";
    if( verboseFlag ){
      if( isNew ){
        type = "ADDED      ";
      }else if( isDeleted ){
        type = "DELETED    ";






|


|



|
>











>







429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
       zName, filename_collation()
    );
  }
  vfile_check_signature(vid, 0);
  if( showAge ){
    db_prepare(&q,
       "SELECT pathname, deleted, rid, chnged, coalesce(origname!=pathname,0),"
       "       datetime(checkin_mtime(%d,rid),'unixepoch',toLocal())"
       "  FROM vfile %s"
       " ORDER BY %s",
       vid, blob_sql_text(&where), zOrderBy /*safe-for-%s*/
    );
  }else{
    db_prepare(&q,
       "SELECT pathname, deleted, rid, chnged,"
       "       coalesce(origname!=pathname,0), islink"
       "  FROM vfile %s"
       " ORDER BY %s", blob_sql_text(&where), zOrderBy /*safe-for-%s*/
    );
  }
  blob_reset(&where);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    int isDeleted = db_column_int(&q, 1);
    int isNew = db_column_int(&q,2)==0;
    int chnged = db_column_int(&q,3);
    int renamed = db_column_int(&q,4);
    int isLink = db_column_int(&q,5);
    char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname);
    const char *type = "";
    if( verboseFlag ){
      if( isNew ){
        type = "ADDED      ";
      }else if( isDeleted ){
        type = "DELETED    ";
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
          type = "UPDATED_BY_MERGE ";
        }else if( chnged==3 ){
          type = "ADDED_BY_MERGE ";
        }else if( chnged==4 ){
          type = "UPDATED_BY_INTEGRATE ";
        }else if( chnged==5 ){
          type = "ADDED_BY_INTEGRATE ";
        }else if( file_contains_merge_marker(zFullName) ){
          type = "CONFLICT   ";
        }else{
          type = "EDITED     ";
        }
      }else if( renamed ){
        type = "RENAMED    ";
      }else{






|







472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
          type = "UPDATED_BY_MERGE ";
        }else if( chnged==3 ){
          type = "ADDED_BY_MERGE ";
        }else if( chnged==4 ){
          type = "UPDATED_BY_INTEGRATE ";
        }else if( chnged==5 ){
          type = "ADDED_BY_INTEGRATE ";
        }else if( !isLink && file_contains_merge_marker(zFullName) ){
          type = "CONFLICT   ";
        }else{
          type = "EDITED     ";
        }
      }else if( renamed ){
        type = "RENAMED    ";
      }else{
668
669
670
671
672
673
674

675
676
677
678
679
680
681
**                     explicitly exempted via the empty-dirs setting
**                     or another applicable setting or command line
**                     argument.  Matching files, if any, are removed
**                     prior to checking for any empty directories;
**                     therefore, directories that contain only files
**                     that were removed will be removed as well.
**    -f|--force       Remove files without prompting.

**    -x|--verily      WARNING: Removes everything that is not a managed
**                     file or the repository itself.  This option
**                     implies the --force, --emptydirs, --dotfiles, and
**                     --disable-undo options.  Furthermore, it completely
**                     disregards the keep-glob and ignore-glob settings.
**                     However, it does honor the --ignore and --keep
**                     options.






>







674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
**                     explicitly exempted via the empty-dirs setting
**                     or another applicable setting or command line
**                     argument.  Matching files, if any, are removed
**                     prior to checking for any empty directories;
**                     therefore, directories that contain only files
**                     that were removed will be removed as well.
**    -f|--force       Remove files without prompting.
**    -i|--prompt      Prompt before removing each file.
**    -x|--verily      WARNING: Removes everything that is not a managed
**                     file or the repository itself.  This option
**                     implies the --force, --emptydirs, --dotfiles, and
**                     --disable-undo options.  Furthermore, it completely
**                     disregards the keep-glob and ignore-glob settings.
**                     However, it does honor the --ignore and --keep
**                     options.
696
697
698
699
700
701
702

703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722

723
724
725
726
727
728
729
**
** See also: addremove, extras, status
*/
void clean_cmd(void){
  int allFileFlag, allDirFlag, dryRunFlag, verboseFlag;
  int emptyDirsFlag, dirsOnlyFlag;
  int disableUndo, noPrompt;

  unsigned scanFlags = 0;
  int verilyFlag = 0;
  const char *zIgnoreFlag, *zKeepFlag, *zCleanFlag;
  Glob *pIgnore, *pKeep, *pClean;
  int nRoot;

#ifndef UNDO_SIZE_LIMIT  /* TODO: Setting? */
#define UNDO_SIZE_LIMIT  (10*1024*1024) /* 10MiB */
#endif

  undo_capture_command_line();
  dryRunFlag = find_option("dry-run","n",0)!=0;
  if( !dryRunFlag ){
    dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
  }
  if( !dryRunFlag ){
    dryRunFlag = find_option("whatif",0,0)!=0;
  }
  disableUndo = find_option("disable-undo",0,0)!=0;
  noPrompt = find_option("no-prompt",0,0)!=0;

  allFileFlag = allDirFlag = find_option("force","f",0)!=0;
  dirsOnlyFlag = find_option("dirsonly",0,0)!=0;
  emptyDirsFlag = find_option("emptydirs","d",0)!=0 || dirsOnlyFlag;
  if( find_option("dotfiles",0,0)!=0 ) scanFlags |= SCAN_ALL;
  if( find_option("temp",0,0)!=0 ) scanFlags |= SCAN_TEMP;
  if( find_option("allckouts",0,0)!=0 ) scanFlags |= SCAN_NESTED;
  zIgnoreFlag = find_option("ignore",0,1);






>




















>







703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
**
** See also: addremove, extras, status
*/
void clean_cmd(void){
  int allFileFlag, allDirFlag, dryRunFlag, verboseFlag;
  int emptyDirsFlag, dirsOnlyFlag;
  int disableUndo, noPrompt;
  int alwaysPrompt = 0;
  unsigned scanFlags = 0;
  int verilyFlag = 0;
  const char *zIgnoreFlag, *zKeepFlag, *zCleanFlag;
  Glob *pIgnore, *pKeep, *pClean;
  int nRoot;

#ifndef UNDO_SIZE_LIMIT  /* TODO: Setting? */
#define UNDO_SIZE_LIMIT  (10*1024*1024) /* 10MiB */
#endif

  undo_capture_command_line();
  dryRunFlag = find_option("dry-run","n",0)!=0;
  if( !dryRunFlag ){
    dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
  }
  if( !dryRunFlag ){
    dryRunFlag = find_option("whatif",0,0)!=0;
  }
  disableUndo = find_option("disable-undo",0,0)!=0;
  noPrompt = find_option("no-prompt",0,0)!=0;
  alwaysPrompt = find_option("prompt","i",0)!=0;
  allFileFlag = allDirFlag = find_option("force","f",0)!=0;
  dirsOnlyFlag = find_option("dirsonly",0,0)!=0;
  emptyDirsFlag = find_option("emptydirs","d",0)!=0 || dirsOnlyFlag;
  if( find_option("dotfiles",0,0)!=0 ) scanFlags |= SCAN_ALL;
  if( find_option("temp",0,0)!=0 ) scanFlags |= SCAN_TEMP;
  if( find_option("allckouts",0,0)!=0 ) scanFlags |= SCAN_NESTED;
  zIgnoreFlag = find_option("ignore",0,1);
775
776
777
778
779
780
781



782














783
784
785
786
787
788
789
790
791
792
793
794
795
796
797

798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
        if( verboseFlag ){
          fossil_print("KEPT file \"%s\" not removed (due to --keep"
                       " or \"keep-glob\")\n", zName+nRoot);
        }
        continue;
      }
      if( !dryRunFlag && !glob_match(pClean, zName+nRoot) ){



        int undoRc = UNDO_NONE;














        if( !disableUndo ){
          undoRc = undo_maybe_save(zName+nRoot, UNDO_SIZE_LIMIT);
        }
        if( undoRc!=UNDO_SAVED_OK ){
          char cReply;
          if( allFileFlag ){
            cReply = 'Y';
          }else if( !noPrompt ){
            Blob ans;
            char *prompt = mprintf("\nWARNING: Deletion of this file will "
                                   "not be undoable via the 'undo'\n"
                                   "         command because %s.\n\n"
                                   "Remove unmanaged file \"%s\" (a=all/y/N)? ",
                                   undo_save_message(undoRc), zName+nRoot);
            prompt_user(prompt, &ans);

            cReply = blob_str(&ans)[0];
            blob_reset(&ans);
          }else{
            cReply = 'N';
          }
          if( cReply=='a' || cReply=='A' ){
            allFileFlag = 1;
          }else if( cReply!='y' && cReply!='Y' ){
            continue;
          }
        }
      }
      if( dryRunFlag || file_delete(zName)==0 ){
        if( verboseFlag || dryRunFlag ){
          fossil_print("Removed unmanaged file: %s\n", zName+nRoot);
        }
      }else if( verboseFlag ){
        fossil_print("Could not remove file: %s\n", zName+nRoot);
      }
    }
    db_finalize(&q);
    if( !dryRunFlag && !disableUndo ) undo_finish();
  }
  if( emptyDirsFlag ){






>
>
>

>
>
>
>
>
>
>
>
>
>
>
>
>
>
|



<




|
|
|
|
|
|
>
















|







784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812

813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
        if( verboseFlag ){
          fossil_print("KEPT file \"%s\" not removed (due to --keep"
                       " or \"keep-glob\")\n", zName+nRoot);
        }
        continue;
      }
      if( !dryRunFlag && !glob_match(pClean, zName+nRoot) ){
        char *zPrompt = 0;
        char cReply;
        Blob ans = empty_blob;
        int undoRc = UNDO_NONE;
        if( alwaysPrompt ){
          zPrompt = mprintf("Remove unmanaged file \"%s\" (a=all/y/N)? ",
                            zName+nRoot);
          prompt_user(zPrompt, &ans);
          fossil_free(zPrompt);
          cReply = fossil_toupper(blob_str(&ans)[0]);
          blob_reset(&ans);
          if( cReply=='N' ) continue;
          if( cReply=='A' ){
            allFileFlag = 1;
            alwaysPrompt = 0;
          }else{
            undoRc = UNDO_SAVED_OK;
          }
        }else if( !disableUndo ){
          undoRc = undo_maybe_save(zName+nRoot, UNDO_SIZE_LIMIT);
        }
        if( undoRc!=UNDO_SAVED_OK ){

          if( allFileFlag ){
            cReply = 'Y';
          }else if( !noPrompt ){
            Blob ans;
            zPrompt = mprintf("\nWARNING: Deletion of this file will "
                              "not be undoable via the 'undo'\n"
                              "         command because %s.\n\n"
                              "Remove unmanaged file \"%s\" (a=all/y/N)? ",
                              undo_save_message(undoRc), zName+nRoot);
            prompt_user(zPrompt, &ans);
            fossil_free(zPrompt);
            cReply = blob_str(&ans)[0];
            blob_reset(&ans);
          }else{
            cReply = 'N';
          }
          if( cReply=='a' || cReply=='A' ){
            allFileFlag = 1;
          }else if( cReply!='y' && cReply!='Y' ){
            continue;
          }
        }
      }
      if( dryRunFlag || file_delete(zName)==0 ){
        if( verboseFlag || dryRunFlag ){
          fossil_print("Removed unmanaged file: %s\n", zName+nRoot);
        }
      }else{
        fossil_print("Could not remove file: %s\n", zName+nRoot);
      }
    }
    db_finalize(&q);
    if( !dryRunFlag && !disableUndo ) undo_finish();
  }
  if( emptyDirsFlag ){
845
846
847
848
849
850
851

852
853
854
855
856
857
858
        char cReply;
        if( !noPrompt ){
          Blob ans;
          char *prompt = mprintf("Remove empty directory \"%s\" (a=all/y/N)? ",
                                 zName+nRoot);
          prompt_user(prompt, &ans);
          cReply = blob_str(&ans)[0];

          blob_reset(&ans);
        }else{
          cReply = 'N';
        }
        if( cReply=='a' || cReply=='A' ){
          allDirFlag = 1;
        }else if( cReply!='y' && cReply!='Y' ){






>







871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
        char cReply;
        if( !noPrompt ){
          Blob ans;
          char *prompt = mprintf("Remove empty directory \"%s\" (a=all/y/N)? ",
                                 zName+nRoot);
          prompt_user(prompt, &ans);
          cReply = blob_str(&ans)[0];
          fossil_free(prompt);
          blob_reset(&ans);
        }else{
          cReply = 'N';
        }
        if( cReply=='a' || cReply=='A' ){
          allDirFlag = 1;
        }else if( cReply!='y' && cReply!='Y' ){
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
  if( zEditor==0 ){
    zEditor = fossil_getenv("EDITOR");
  }
#if defined(_WIN32) || defined(__CYGWIN__)
  if( zEditor==0 ){
    zEditor = mprintf("%s\\notepad.exe", fossil_getenv("SYSTEMROOT"));
#if defined(__CYGWIN__)
    zEditor = fossil_utf8_to_filename(zEditor);
    blob_add_cr(pPrompt);
#endif
  }
#endif
  if( zEditor==0 ){
    blob_append(pPrompt,
       "#\n"






|







926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
  if( zEditor==0 ){
    zEditor = fossil_getenv("EDITOR");
  }
#if defined(_WIN32) || defined(__CYGWIN__)
  if( zEditor==0 ){
    zEditor = mprintf("%s\\notepad.exe", fossil_getenv("SYSTEMROOT"));
#if defined(__CYGWIN__)
    zEditor = fossil_utf8_to_path(zEditor, 0);
    blob_add_cr(pPrompt);
#endif
  }
#endif
  if( zEditor==0 ){
    blob_append(pPrompt,
       "#\n"
1648
1649
1650
1651
1652
1653
1654


1655
1656
1657
1658
1659
1660
1661
**    -n|--dry-run               If given, display instead of run actions
**    --no-warnings              omit all warnings about file contents
**    --nosign                   do not attempt to sign this commit with gpg
**    --private                  do not sync changes and their descendants
**    --sha1sum                  verify file status using SHA1 hashing rather
**                               than relying on file mtimes
**    --tag TAG-NAME             assign given tag TAG-NAME to the check-in


**
** See also: branch, changes, checkout, extras, sync
*/
void commit_cmd(void){
  int hasChanges;        /* True if unsaved changes exist */
  int vid;               /* blob-id of parent version */
  int nrid;              /* blob-id of a modified file */






>
>







1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
**    -n|--dry-run               If given, display instead of run actions
**    --no-warnings              omit all warnings about file contents
**    --nosign                   do not attempt to sign this commit with gpg
**    --private                  do not sync changes and their descendants
**    --sha1sum                  verify file status using SHA1 hashing rather
**                               than relying on file mtimes
**    --tag TAG-NAME             assign given tag TAG-NAME to the check-in
**    --date-override DATE       DATE to use instead of 'now'
**    --user-override USER       USER to use instead of the current default
**
** See also: branch, changes, checkout, extras, sync
*/
void commit_cmd(void){
  int hasChanges;        /* True if unsaved changes exist */
  int vid;               /* blob-id of parent version */
  int nrid;              /* blob-id of a modified file */
Changes to src/checkout.c.
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
    return;
  }
  if( !keepFlag ){
    uncheckout(prior);
  }
  db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid);
  if( !keepFlag ){
    vfile_to_disk(vid, 0, 1, promptFlag);
  }
  checkout_set_all_exe(vid);
  manifest_to_disk(vid);
  ensure_empty_dirs_created();
  db_lset_int("checkout", vid);
  undo_reset();
  db_multi_exec("DELETE FROM vmerge");






|







241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
    return;
  }
  if( !keepFlag ){
    uncheckout(prior);
  }
  db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid);
  if( !keepFlag ){
    vfile_to_disk(vid, 0, !g.fQuiet, promptFlag);
  }
  checkout_set_all_exe(vid);
  manifest_to_disk(vid);
  ensure_empty_dirs_created();
  db_lset_int("checkout", vid);
  undo_reset();
  db_multi_exec("DELETE FROM vmerge");
Changes to src/clone.c.
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102




103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  );
}


/*
** COMMAND: clone
**
** Usage: %fossil clone ?OPTIONS? URL FILENAME
**
** Make a clone of a repository specified by URL in the local
** file named FILENAME.
**
** URL must be in one of the following form: ([...] mean optional)
**   HTTP/HTTPS protocol:
**     http[s]://[userid[:password]@]host[:port][/path]
**
**   SSH protocol:
**     ssh://[userid[:password]@]host[:port]/path/to/repo.fossil\\
**     [?fossil=path/to/fossil.exe]
**
**   Filesystem:
**     [file://]path/to/repo.fossil
**
**   Note: For ssh and filesystem, path must have an extra leading
**         '/' to use an absolute path.




**
** By default, your current login name is used to create the default
** admin user. This can be overridden using the -A|--admin-user
** parameter.
**
** Options:
**    --admin-user|-A USERNAME   Make USERNAME the administrator
**    --once                     Don't save url.
**    --private                  Also clone private branches
**    --ssl-identity=filename    Use the SSL identity if requested by the server
**    --ssh-command|-c 'command' Use this SSH command
**    --httpauth|-B 'user:pass'  Add HTTP Basic Authorization to requests
**    --verbose                  Show more statistics in output
**
** See also: init
*/
void clone_cmd(void){
  char *zPassword;
  const char *zDefaultUser;   /* Optional name of the default user */






|

|


|




|





|

>
>
>
>







|

|
|
|







78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  );
}


/*
** COMMAND: clone
**
** Usage: %fossil clone ?OPTIONS? URI FILENAME
**
** Make a clone of a repository specified by URI in the local
** file named FILENAME.
**
** URI may be one of the following form: ([...] mean optional)
**   HTTP/HTTPS protocol:
**     http[s]://[userid[:password]@]host[:port][/path]
**
**   SSH protocol:
**     ssh://[userid@]host[:port]/path/to/repo.fossil\\
**     [?fossil=path/to/fossil.exe]
**
**   Filesystem:
**     [file://]path/to/repo.fossil
**
** Note 1: For ssh and filesystem, path must have an extra leading
**         '/' to use an absolute path.
**
** Note 2: Use %HH escapes for special characters in the userid and 
**         password.  For example "%40" in place of "@", "%2f" in place
**         of "/", and "%3a" in place of ":".
**
** By default, your current login name is used to create the default
** admin user. This can be overridden using the -A|--admin-user
** parameter.
**
** Options:
**    --admin-user|-A USERNAME   Make USERNAME the administrator
**    --once                     Don't remember the URI.
**    --private                  Also clone private branches
**    --ssl-identity FILENAME    Use the SSL identity if requested by the server
**    --ssh-command|-c SSH       Use SSH as the "ssh" command
**    --httpauth|-B USER:PASS    Add HTTP Basic Authorization to requests
**    --verbose                  Show more statistics in output
**
** See also: init
*/
void clone_cmd(void){
  char *zPassword;
  const char *zDefaultUser;   /* Optional name of the default user */
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  /* We should be done with options.. */
  verify_all_options();

  if( g.argc < 4 ){
    usage("?OPTIONS? FILE-OR-URL NEW-REPOSITORY");
  }
  db_open_config(0);
  if( -1 != file_size(g.argv[3]) ){
    fossil_fatal("file already exists: %s", g.argv[3]);
  }

  url_parse(g.argv[2], urlFlags);
  if( zDefaultUser==0 && g.url.user!=0 ) zDefaultUser = g.url.user;
  if( g.url.isFile ){






|







139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
  /* We should be done with options.. */
  verify_all_options();

  if( g.argc < 4 ){
    usage("?OPTIONS? FILE-OR-URL NEW-REPOSITORY");
  }
  db_open_config(0, 0);
  if( -1 != file_size(g.argv[3]) ){
    fossil_fatal("file already exists: %s", g.argv[3]);
  }

  url_parse(g.argv[2], urlFlags);
  if( zDefaultUser==0 && g.url.user!=0 ) zDefaultUser = g.url.user;
  if( g.url.isFile ){
Changes to src/codecheck1.c.
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
/*
** A list of functions that return strings that are safe to insert into
** SQL using %s.
*/
static const char *azSafeFunc[] = {
  "filename_collation",
  "db_name",
  "timeline_utc",
  "leaf_is_closed_sql",
  "timeline_query_for_www",
  "timeline_query_for_tty",
  "blob_sql_text",
  "glob_expr",
  "fossil_all_reserved_names",
  "configure_inop_rhs",






<







248
249
250
251
252
253
254

255
256
257
258
259
260
261
/*
** A list of functions that return strings that are safe to insert into
** SQL using %s.
*/
static const char *azSafeFunc[] = {
  "filename_collation",
  "db_name",

  "leaf_is_closed_sql",
  "timeline_query_for_www",
  "timeline_query_for_tty",
  "blob_sql_text",
  "glob_expr",
  "fossil_all_reserved_names",
  "configure_inop_rhs",
Changes to src/configure.c.
880
881
882
883
884
885
886


887
888
889
890
891
892
893
894
895
896
897
898
**    -R|--repository FILE       Extract info from repository FILE
**
** See also: settings, unset
*/
void configuration_cmd(void){
  int n;
  const char *zMethod;


  if( g.argc<3 ){
    usage("export|import|merge|pull|reset ...");
  }
  db_find_and_open_repository(0, 0);
  db_open_config(0);
  zMethod = g.argv[2];
  n = strlen(zMethod);
  if( strncmp(zMethod, "export", n)==0 ){
    int mask;
    const char *zSince = find_option("since",0,1);
    sqlite3_int64 iStart;
    if( g.argc!=5 ){






>
>

|

<
<







880
881
882
883
884
885
886
887
888
889
890
891


892
893
894
895
896
897
898
**    -R|--repository FILE       Extract info from repository FILE
**
** See also: settings, unset
*/
void configuration_cmd(void){
  int n;
  const char *zMethod;
  db_find_and_open_repository(0, 0);
  db_open_config(0, 0);
  if( g.argc<3 ){
    usage("SUBCOMMAND ...");
  }


  zMethod = g.argv[2];
  n = strlen(zMethod);
  if( strncmp(zMethod, "export", n)==0 ){
    int mask;
    const char *zSince = find_option("since",0,1);
    sqlite3_int64 iStart;
    if( g.argc!=5 ){
Changes to src/content.c.
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
/*
** COMMAND:  test-content-undelta
**
** Make sure the content at RECORDID is not a delta
*/
void test_content_undelta_cmd(void){
  int rid;
  if( g.argc!=2 ) usage("RECORDID");
  db_must_be_within_tree();
  rid = atoi(g.argv[2]);
  content_undelta(rid);
}

/*
** Return true if the given RID is marked as PRIVATE.






|







707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
/*
** COMMAND:  test-content-undelta
**
** Make sure the content at RECORDID is not a delta
*/
void test_content_undelta_cmd(void){
  int rid;
  if( g.argc!=3 ) usage("RECORDID");
  db_must_be_within_tree();
  rid = atoi(g.argv[2]);
  content_undelta(rid);
}

/*
** Return true if the given RID is marked as PRIVATE.
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
    if( blob_size(&content)!=size ){
      fossil_print("size mismatch on artifact %d: wanted %d but got %d\n",
                     rid, size, blob_size(&content));
      nErr++;
    }
    sha1sum_blob(&content, &cksum);
    if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){
      fossil_print("checksum mismatch on artifact %d: wanted %s but got %s\n",
                   rid, zUuid, blob_str(&cksum));
      nErr++;
    }
    if( bParse && looks_like_control_artifact(&content) ){
      Blob err;
      int i, n;
      char *z;






|







903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
    if( blob_size(&content)!=size ){
      fossil_print("size mismatch on artifact %d: wanted %d but got %d\n",
                     rid, size, blob_size(&content));
      nErr++;
    }
    sha1sum_blob(&content, &cksum);
    if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){
      fossil_print("wrong hash on artifact %d: wanted %s but got %s\n",
                   rid, zUuid, blob_str(&cksum));
      nErr++;
    }
    if( bParse && looks_like_control_artifact(&content) ){
      Blob err;
      int i, n;
      char *z;
951
952
953
954
955
956
957


958
959
960
961
962
963
964
        "control", "wiki", "ticket", "attachment", "event" };
    int i;
    fossil_print("%d total control artifacts\n", nCA);
    for(i=1; i<count(azType); i++){
      if( anCA[i] ) fossil_print("  %d %ss\n", anCA[i], azType[i]);
    }
  }


}

/*
** COMMAND: test-orphans
**
** Search the repository for orphaned artifacts
*/






>
>







951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
        "control", "wiki", "ticket", "attachment", "event" };
    int i;
    fossil_print("%d total control artifacts\n", nCA);
    for(i=1; i<count(azType); i++){
      if( anCA[i] ) fossil_print("  %d %ss\n", anCA[i], azType[i]);
    }
  }
  fossil_print("low-level database integrity-check: ");
  fossil_print("%s\n", db_text(0, "PRAGMA integrity_check(10)"));
}

/*
** COMMAND: test-orphans
**
** Search the repository for orphaned artifacts
*/
1122
1123
1124
1125
1126
1127
1128















































  }
  db_finalize(&q);
  if( nErr>0 || quietFlag==0 ){
    fossil_print("%d missing or shunned references in %d control artifacts\n",
                 nErr, nArtifact);
  }
}





















































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
  }
  db_finalize(&q);
  if( nErr>0 || quietFlag==0 ){
    fossil_print("%d missing or shunned references in %d control artifacts\n",
                 nErr, nArtifact);
  }
}

/*
** COMMAND: test-content-erase
**
** Usage: %fossil test-content-erase RID ....
**
** Remove all traces of one or more artifacts from the local repository.
**
** WARNING: This command destroys data and can cause you to lose work.
** Make sure you have a backup copy before using this command!
**
** WARNING: You must run "fossil rebuild" after this command to rebuild
** the metadata.
**
** Note that the arguments are the integer raw RID values from the BLOB table,
** not SHA1 hashs or labels.
*/
void test_content_erase(void){
  int i;
  Blob x;
  char c;
  Stmt q;
  prompt_user("This command erases information from the repository and\n"
              "might irrecoverably damage the repository.  Make sure you\n"
              "have a backup copy!\n"
              "Continue? (y/N)? ", &x);
  c = blob_str(&x)[0];
  blob_reset(&x);
  if( c!='y' && c!='Y' ) return;
  db_find_and_open_repository(OPEN_ANY_SCHEMA, 0);
  db_begin_transaction();
  db_prepare(&q, "SELECT rid FROM delta WHERE srcid=:rid");
  for(i=2; i<g.argc; i++){
    int rid = atoi(g.argv[i]);
    fossil_print("Erasing artifact %d (%s)\n", 
                 rid, db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid));
    db_bind_int(&q, ":rid", rid);
    while( db_step(&q)==SQLITE_ROW ){
      content_undelta(db_column_int(&q,0));
    }
    db_reset(&q);
    db_multi_exec("DELETE FROM blob WHERE rid=%d", rid);
    db_multi_exec("DELETE FROM delta WHERE rid=%d", rid);
  }
  db_finalize(&q);
  db_end_transaction(0);
}
Changes to src/cson_amalgamation.c.
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
#define cson_value_api_empty_m {           \
        CSON_TYPE_UNDEF/*typeID*/,         \
        NULL/*cleanup*/\
      }
/**
   Empty-initialized cson_value_api object.
*/
static const cson_value_api cson_value_api_empty = cson_value_api_empty_m;


typedef unsigned int cson_counter_t;
struct cson_value
{
    /** The "vtbl" of type-specific operations. All instances
        of a given logical value type share a single api instance.






|







1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
#define cson_value_api_empty_m {           \
        CSON_TYPE_UNDEF/*typeID*/,         \
        NULL/*cleanup*/\
      }
/**
   Empty-initialized cson_value_api object.
*/
/*static const cson_value_api cson_value_api_empty = cson_value_api_empty_m;*/


typedef unsigned int cson_counter_t;
struct cson_value
{
    /** The "vtbl" of type-specific operations. All instances
        of a given logical value type share a single api instance.
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
    cson_counter_t refcount;
};


/**
   Empty-initialized cson_value object.
*/
#define cson_value_empty_m { &cson_value_api_empty/*api*/, NULL/*value*/, 0/*refcount*/ }
/**
   Empty-initialized cson_value object.
*/
static const cson_value cson_value_empty = cson_value_empty_m;
const cson_parse_opt cson_parse_opt_empty = cson_parse_opt_empty_m;
const cson_output_opt cson_output_opt_empty = cson_output_opt_empty_m;
const cson_object_iterator cson_object_iterator_empty = cson_object_iterator_empty_m;
const cson_buffer cson_buffer_empty = cson_buffer_empty_m;
const cson_parse_info cson_parse_info_empty = cson_parse_info_empty_m;

static void cson_value_destroy_zero_it( cson_value * self );






<
<
<
<
<







1527
1528
1529
1530
1531
1532
1533





1534
1535
1536
1537
1538
1539
1540
    cson_counter_t refcount;
};


/**
   Empty-initialized cson_value object.
*/





const cson_parse_opt cson_parse_opt_empty = cson_parse_opt_empty_m;
const cson_output_opt cson_output_opt_empty = cson_output_opt_empty_m;
const cson_object_iterator cson_object_iterator_empty = cson_object_iterator_empty_m;
const cson_buffer cson_buffer_empty = cson_buffer_empty_m;
const cson_parse_info cson_parse_info_empty = cson_parse_info_empty_m;

static void cson_value_destroy_zero_it( cson_value * self );
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
static const cson_value_api cson_value_api_integer = { CSON_TYPE_INTEGER, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_double = { CSON_TYPE_DOUBLE, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_string = { CSON_TYPE_STRING, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_array = { CSON_TYPE_ARRAY, cson_value_destroy_array };
static const cson_value_api cson_value_api_object = { CSON_TYPE_OBJECT, cson_value_destroy_object };

static const cson_value cson_value_undef = { &cson_value_api_undef, NULL, 0 };
static const cson_value cson_value_null_empty = { &cson_value_api_null, NULL, 0 };
static const cson_value cson_value_bool_empty = { &cson_value_api_bool, NULL, 0 };
static const cson_value cson_value_integer_empty = { &cson_value_api_integer, NULL, 0 };
static const cson_value cson_value_double_empty = { &cson_value_api_double, NULL, 0 };
static const cson_value cson_value_string_empty = { &cson_value_api_string, NULL, 0 };
static const cson_value cson_value_array_empty = { &cson_value_api_array, NULL, 0 };
static const cson_value cson_value_object_empty = { &cson_value_api_object, NULL, 0 };

/**






<
<







1551
1552
1553
1554
1555
1556
1557


1558
1559
1560
1561
1562
1563
1564
static const cson_value_api cson_value_api_integer = { CSON_TYPE_INTEGER, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_double = { CSON_TYPE_DOUBLE, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_string = { CSON_TYPE_STRING, cson_value_destroy_zero_it };
static const cson_value_api cson_value_api_array = { CSON_TYPE_ARRAY, cson_value_destroy_array };
static const cson_value_api cson_value_api_object = { CSON_TYPE_OBJECT, cson_value_destroy_object };

static const cson_value cson_value_undef = { &cson_value_api_undef, NULL, 0 };


static const cson_value cson_value_integer_empty = { &cson_value_api_integer, NULL, 0 };
static const cson_value cson_value_double_empty = { &cson_value_api_double, NULL, 0 };
static const cson_value cson_value_string_empty = { &cson_value_api_string, NULL, 0 };
static const cson_value cson_value_array_empty = { &cson_value_api_array, NULL, 0 };
static const cson_value cson_value_object_empty = { &cson_value_api_object, NULL, 0 };

/**
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
{
    cson_kvp ** list;
    unsigned int count;
    unsigned int alloced;
};
typedef struct cson_kvp_list cson_kvp_list;
#define cson_kvp_list_empty_m {NULL/*list*/,0/*count*/,0/*alloced*/}
static const cson_kvp_list cson_kvp_list_empty = cson_kvp_list_empty_m;

struct cson_object
{
    cson_kvp_list kvp;
};
/*typedef struct cson_object cson_object;*/
#define cson_object_empty_m { cson_kvp_list_empty_m/*kvp*/ }






|







2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
{
    cson_kvp ** list;
    unsigned int count;
    unsigned int alloced;
};
typedef struct cson_kvp_list cson_kvp_list;
#define cson_kvp_list_empty_m {NULL/*list*/,0/*count*/,0/*alloced*/}
/*static const cson_kvp_list cson_kvp_list_empty = cson_kvp_list_empty_m;*/

struct cson_object
{
    cson_kvp_list kvp;
};
/*typedef struct cson_object cson_object;*/
#define cson_object_empty_m { cson_kvp_list_empty_m/*kvp*/ }
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
{
    return ( !v || !v->api || (v->api==&cson_value_api_undef))
        ? 1 : 0;
}
#define ISA(T,TID) char cson_value_is_##T( cson_value const * v ) {       \
        /*return (v && v->api) ? cson_value_is_a(v,CSON_TYPE_##TID) : 0;*/ \
        return (v && (v->api == &cson_value_api_##T)) ? 1 : 0; \
    } static const char bogusPlaceHolderForEmacsIndention##TID = CSON_TYPE_##TID
ISA(null,NULL);
ISA(bool,BOOL);
ISA(integer,INTEGER);
ISA(double,DOUBLE);
ISA(string,STRING);
ISA(array,ARRAY);
ISA(object,OBJECT);






|







2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
{
    return ( !v || !v->api || (v->api==&cson_value_api_undef))
        ? 1 : 0;
}
#define ISA(T,TID) char cson_value_is_##T( cson_value const * v ) {       \
        /*return (v && v->api) ? cson_value_is_a(v,CSON_TYPE_##TID) : 0;*/ \
        return (v && (v->api == &cson_value_api_##T)) ? 1 : 0; \
    } extern char bogusPlaceHolderForEmacsIndention##TID
ISA(null,NULL);
ISA(bool,BOOL);
ISA(integer,INTEGER);
ISA(double,DOUBLE);
ISA(string,STRING);
ISA(array,ARRAY);
ISA(object,OBJECT);
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
                    rc = sprintf(ubuf, "\\u%04x",ch);
                    if( rc != 6 )
                    {
                        rc = cson_rc.RangeError;
                        break;
                    }
                    rc = f( state, ubuf, 6 );
                }else{ /* encode as a UTF16 surrugate pair */
                    /* http://unicodebook.readthedocs.org/en/latest/unicode_encodings.html#surrogates */
                    ch -= 0x10000;
                    rc = sprintf(ubuf, "\\u%04x\\u%04x",
                                 (0xd800 | (ch>>10)),
                                 (0xdc00 | (ch & 0x3ff)));
                    if( rc != 12 )
                    {






|







3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
                    rc = sprintf(ubuf, "\\u%04x",ch);
                    if( rc != 6 )
                    {
                        rc = cson_rc.RangeError;
                        break;
                    }
                    rc = f( state, ubuf, 6 );
                }else{ /* encode as a UTF16 surrogate pair */
                    /* http://unicodebook.readthedocs.org/en/latest/unicode_encodings.html#surrogates */
                    ch -= 0x10000;
                    rc = sprintf(ubuf, "\\u%04x\\u%04x",
                                 (0xd800 | (ch>>10)),
                                 (0xdc00 | (ch & 0x3ff)));
                    if( rc != 12 )
                    {
Changes to src/db.c.
789
790
791
792
793
794
795



























































796
797
798
799
800
801
802
803
804
805
806
807
808
809




810
811
812
813
814
815
816
    }else if(0==rid){
      sqlite3_result_null(context);
    }else{
      sqlite3_result_int64(context, rid);
    }
  }
}




























































/*
** Register the SQL functions that are useful both to the internal
** representation and to the "fossil sql" command.
*/
void db_add_aux_functions(sqlite3 *db){
  sqlite3_create_function(db, "checkin_mtime", 2, SQLITE_UTF8, 0,
                          db_checkin_mtime_function, 0, 0);
  sqlite3_create_function(db, "symbolic_name_to_rid", 1, SQLITE_UTF8, 0,
                          db_sym2rid_function, 0, 0);
  sqlite3_create_function(db, "symbolic_name_to_rid", 2, SQLITE_UTF8, 0,
                          db_sym2rid_function, 0, 0);
  sqlite3_create_function(db, "now", 0, SQLITE_UTF8, 0,
                                 db_now_function, 0, 0);




}


/*
** Open a database file.  Return a pointer to the new database
** connection.  An error results in process abort.
*/






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>













|
>
>
>
>







789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
    }else if(0==rid){
      sqlite3_result_null(context);
    }else{
      sqlite3_result_int64(context, rid);
    }
  }
}

/*
** The toLocal() SQL function returns a string that is an argument to a
** date/time function that is appropriate for modifying the time for display.
** If UTC time display is selected, no modification occurs.  If local time
** display is selected, the time is adjusted appropriately.
**
** Example usage:
**
**         SELECT datetime('now',toLocal());
*/
void db_tolocal_function(
  sqlite3_context *context,
  int argc,
  sqlite3_value **argv
){
  if( g.fTimeFormat==0 ){
    if( db_get_int("timeline-utc", 1) ){
      g.fTimeFormat = 1;
    }else{
      g.fTimeFormat = 2;
    }
  }
  if( g.fTimeFormat==1 ){
    sqlite3_result_text(context, "0 seconds", -1, SQLITE_STATIC);
  }else{
    sqlite3_result_text(context, "localtime", -1, SQLITE_STATIC);
  }
}

/*
** The fromLocal() SQL function returns a string that is an argument to a
** date/time function that is appropriate to convert an input time to UTC.
** If UTC time display is selected, no modification occurs.  If local time
** display is selected, the time is adjusted from local to UTC.
**
** Example usage:
**
**         SELECT julianday(:user_input,fromLocal());
*/
void db_fromlocal_function(
  sqlite3_context *context,
  int argc,
  sqlite3_value **argv
){
  if( g.fTimeFormat==0 ){
    if( db_get_int("timeline-utc", 1) ){
      g.fTimeFormat = 1;
    }else{
      g.fTimeFormat = 2;
    }
  }
  if( g.fTimeFormat==1 ){
    sqlite3_result_text(context, "0 seconds", -1, SQLITE_STATIC);
  }else{
    sqlite3_result_text(context, "utc", -1, SQLITE_STATIC);
  }
}


/*
** Register the SQL functions that are useful both to the internal
** representation and to the "fossil sql" command.
*/
void db_add_aux_functions(sqlite3 *db){
  sqlite3_create_function(db, "checkin_mtime", 2, SQLITE_UTF8, 0,
                          db_checkin_mtime_function, 0, 0);
  sqlite3_create_function(db, "symbolic_name_to_rid", 1, SQLITE_UTF8, 0,
                          db_sym2rid_function, 0, 0);
  sqlite3_create_function(db, "symbolic_name_to_rid", 2, SQLITE_UTF8, 0,
                          db_sym2rid_function, 0, 0);
  sqlite3_create_function(db, "now", 0, SQLITE_UTF8, 0,
                          db_now_function, 0, 0);
  sqlite3_create_function(db, "toLocal", 0, SQLITE_UTF8, 0,
                          db_tolocal_function, 0, 0);
  sqlite3_create_function(db, "fromLocal", 0, SQLITE_UTF8, 0,
                          db_fromlocal_function, 0, 0);
}


/*
** Open a database file.  Return a pointer to the new database
** connection.  An error results in process abort.
*/
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943

944
945
946
947
948
949
950
951
952

953
954
955
956
957

958
959
960
961
962
963
964
965
966
967

968
969
970
971
972

973
974
975
976
977
978
979
980
981
982
983
984

985
986
987
988
989
990
991
** opened on a separate database connection g.dbConfig.  This prevents
** the ~/.fossil database from becoming locked on long check-in or sync
** operations which hold an exclusive transaction.  In a few cases, though,
** it is convenient for the ~/.fossil to be attached to the main database
** connection so that we can join between the various databases.  In that
** case, invoke this routine with useAttach as 1.
*/
void db_open_config(int useAttach){
  char *zDbName;
  char *zHome;
  if( g.zConfigDbName ){
    if( useAttach==g.useAttach ) return;
    db_close_config();
  }
  zHome = fossil_getenv("FOSSIL_HOME");
#if defined(_WIN32) || defined(__CYGWIN__)
  if( zHome==0 ){
    zHome = fossil_getenv("LOCALAPPDATA");
    if( zHome==0 ){
      zHome = fossil_getenv("APPDATA");
      if( zHome==0 ){
        char *zDrive = fossil_getenv("HOMEDRIVE");
        char *zPath = fossil_getenv("HOMEPATH");
        if( zDrive && zPath ) zHome = mprintf("%s%s", zDrive, zPath);
      }
    }
  }
  if( zHome==0 ){

    fossil_fatal("cannot locate home directory - please set the "
                 "FOSSIL_HOME, LOCALAPPDATA, APPDATA, or HOMEPATH "
                 "environment variables");
  }
#else
  if( zHome==0 ){
    zHome = fossil_getenv("HOME");
  }
  if( zHome==0 ){

    fossil_fatal("cannot locate home directory - please set the "
                 "FOSSIL_HOME or HOME environment variables");
  }
#endif
  if( file_isdir(zHome)!=1 ){

    fossil_fatal("invalid home directory: %s", zHome);
  }
#if defined(_WIN32) || defined(__CYGWIN__)
  /* . filenames give some window systems problems and many apps problems */
  zDbName = mprintf("%//_fossil", zHome);
#else
  zDbName = mprintf("%s/.fossil", zHome);
#endif
  if( file_size(zDbName)<1024*3 ){
    if( file_access(zHome, W_OK) ){

      fossil_fatal("home directory %s must be writeable", zHome);
    }
    db_init_database(zDbName, zConfigSchema, (char*)0);
  }
  if( file_access(zDbName, W_OK) ){

    fossil_fatal("configuration file %s must be writeable", zDbName);
  }
  if( useAttach ){
    db_open_or_attach(zDbName, "configdb", &g.useAttach);
    g.dbConfig = 0;
    g.zConfigDbType = 0;
  }else{
    g.useAttach = 0;
    g.dbConfig = db_open(zDbName);
    g.zConfigDbType = "configdb";
  }
  g.zConfigDbName = zDbName;

}

/*
** Return TRUE if zTable exists.
*/
int db_table_exists(
  const char *zDb,      /* One of: NULL, "configdb", "localdb", "repository" */






|



|
















>









>





>










>





>












>







979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
** opened on a separate database connection g.dbConfig.  This prevents
** the ~/.fossil database from becoming locked on long check-in or sync
** operations which hold an exclusive transaction.  In a few cases, though,
** it is convenient for the ~/.fossil to be attached to the main database
** connection so that we can join between the various databases.  In that
** case, invoke this routine with useAttach as 1.
*/
int db_open_config(int useAttach, int isOptional){
  char *zDbName;
  char *zHome;
  if( g.zConfigDbName ){
    if( useAttach==g.useAttach ) return 1; /* Already open. */
    db_close_config();
  }
  zHome = fossil_getenv("FOSSIL_HOME");
#if defined(_WIN32) || defined(__CYGWIN__)
  if( zHome==0 ){
    zHome = fossil_getenv("LOCALAPPDATA");
    if( zHome==0 ){
      zHome = fossil_getenv("APPDATA");
      if( zHome==0 ){
        char *zDrive = fossil_getenv("HOMEDRIVE");
        char *zPath = fossil_getenv("HOMEPATH");
        if( zDrive && zPath ) zHome = mprintf("%s%s", zDrive, zPath);
      }
    }
  }
  if( zHome==0 ){
    if( isOptional ) return 0;
    fossil_fatal("cannot locate home directory - please set the "
                 "FOSSIL_HOME, LOCALAPPDATA, APPDATA, or HOMEPATH "
                 "environment variables");
  }
#else
  if( zHome==0 ){
    zHome = fossil_getenv("HOME");
  }
  if( zHome==0 ){
    if( isOptional ) return 0;
    fossil_fatal("cannot locate home directory - please set the "
                 "FOSSIL_HOME or HOME environment variables");
  }
#endif
  if( file_isdir(zHome)!=1 ){
    if( isOptional ) return 0;
    fossil_fatal("invalid home directory: %s", zHome);
  }
#if defined(_WIN32) || defined(__CYGWIN__)
  /* . filenames give some window systems problems and many apps problems */
  zDbName = mprintf("%//_fossil", zHome);
#else
  zDbName = mprintf("%s/.fossil", zHome);
#endif
  if( file_size(zDbName)<1024*3 ){
    if( file_access(zHome, W_OK) ){
      if( isOptional ) return 0;
      fossil_fatal("home directory %s must be writeable", zHome);
    }
    db_init_database(zDbName, zConfigSchema, (char*)0);
  }
  if( file_access(zDbName, W_OK) ){
    if( isOptional ) return 0;
    fossil_fatal("configuration file %s must be writeable", zDbName);
  }
  if( useAttach ){
    db_open_or_attach(zDbName, "configdb", &g.useAttach);
    g.dbConfig = 0;
    g.zConfigDbType = 0;
  }else{
    g.useAttach = 0;
    g.dbConfig = db_open(zDbName);
    g.zConfigDbType = "configdb";
  }
  g.zConfigDbName = zDbName;
  return 1;
}

/*
** Return TRUE if zTable exists.
*/
int db_table_exists(
  const char *zDb,      /* One of: NULL, "configdb", "localdb", "repository" */
1091
1092
1093
1094
1095
1096
1097



1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
  if( g.localOpen ) return 1;
  file_getcwd(zPwd, sizeof(zPwd)-20);
  n = strlen(zPwd);
  while( n>0 ){
    for(i=0; i<count(aDbName); i++){
      sqlite3_snprintf(sizeof(zPwd)-n, &zPwd[n], "/%s", aDbName[i]);
      if( isValidLocalDb(zPwd) ){



        /* Found a valid checkout database file */
        g.zLocalDbName = mprintf("%s", zPwd);
        zPwd[n] = 0;
        while( n>0 && zPwd[n-1]=='/' ){
          n--;
          zPwd[n] = 0;
        }
        g.zLocalRoot = mprintf("%s/", zPwd);
        g.localOpen = 1;
        db_open_config(0);
        db_open_repository(zDbName);
        return 1;
      }
    }
    n--;
    while( n>1 && zPwd[n]!='/' ){ n--; }
    while( n>1 && zPwd[n-1]=='/' ){ n--; }






>
>
>









<







1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178

1179
1180
1181
1182
1183
1184
1185
  if( g.localOpen ) return 1;
  file_getcwd(zPwd, sizeof(zPwd)-20);
  n = strlen(zPwd);
  while( n>0 ){
    for(i=0; i<count(aDbName); i++){
      sqlite3_snprintf(sizeof(zPwd)-n, &zPwd[n], "/%s", aDbName[i]);
      if( isValidLocalDb(zPwd) ){
        if( db_open_config(0, 1)==0 ){
          return 0; /* Configuration could not be opened */
        }
        /* Found a valid checkout database file */
        g.zLocalDbName = mprintf("%s", zPwd);
        zPwd[n] = 0;
        while( n>0 && zPwd[n-1]=='/' ){
          n--;
          zPwd[n] = 0;
        }
        g.zLocalRoot = mprintf("%s/", zPwd);
        g.localOpen = 1;

        db_open_repository(zDbName);
        return 1;
      }
    }
    n--;
    while( n>1 && zPwd[n]!='/' ){ n--; }
    while( n>1 && zPwd[n-1]=='/' ){ n--; }
1132
1133
1134
1135
1136
1137
1138












1139
1140
1141
1142
1143
1144
1145
    zRepo = db_lget("repository", 0);
    if( zRepo && !file_is_absolute_path(zRepo) ){
      zRepo = mprintf("%s%s", g.zLocalRoot, zRepo);
    }
  }
  return zRepo;
}













/*
** Open the repository database given by zDbName.  If zDbName==NULL then
** get the name from the already open local database.
*/
void db_open_repository(const char *zDbName){
  if( g.repositoryOpen ) return;






>
>
>
>
>
>
>
>
>
>
>
>







1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
    zRepo = db_lget("repository", 0);
    if( zRepo && !file_is_absolute_path(zRepo) ){
      zRepo = mprintf("%s%s", g.zLocalRoot, zRepo);
    }
  }
  return zRepo;
}

/*
** Returns non-zero if the default value for the "allow-symlinks" setting
** is "on".
*/
int db_allow_symlinks_by_default(void){
#if defined(_WIN32)
  return 0;
#else
  return 1;
#endif
}

/*
** Open the repository database given by zDbName.  If zDbName==NULL then
** get the name from the already open local database.
*/
void db_open_repository(const char *zDbName){
  if( g.repositoryOpen ) return;
1170
1171
1172
1173
1174
1175
1176
1177

1178
1179
1180
1181
1182
1183
1184
      fossil_panic("not a valid repository: %s", zDbName);
    }
  }
  g.zRepositoryName = mprintf("%s", zDbName);
  db_open_or_attach(g.zRepositoryName, "repository", 0);
  g.repositoryOpen = 1;
  /* Cache "allow-symlinks" option, because we'll need it on every stat call */
  g.allowSymlinks = db_get_boolean("allow-symlinks", 0);

  g.zAuxSchema = db_get("aux-schema","");

  /* Verify that the PLINK table has a new column added by the
  ** 2014-11-28 schema change.  Create it if necessary.  This code
  ** can be removed in the future, once all users have upgraded to the
  ** 2014-11-28 or later schema.
  */






|
>







1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
      fossil_panic("not a valid repository: %s", zDbName);
    }
  }
  g.zRepositoryName = mprintf("%s", zDbName);
  db_open_or_attach(g.zRepositoryName, "repository", 0);
  g.repositoryOpen = 1;
  /* Cache "allow-symlinks" option, because we'll need it on every stat call */
  g.allowSymlinks = db_get_boolean("allow-symlinks",
                                   db_allow_symlinks_by_default());
  g.zAuxSchema = db_get("aux-schema","");

  /* Verify that the PLINK table has a new column added by the
  ** 2014-11-28 schema change.  Create it if necessary.  This code
  ** can be removed in the future, once all users have upgraded to the
  ** 2014-11-28 or later schema.
  */
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
  if( -1 != file_size(g.argv[2]) ){
    fossil_fatal("file already exists: %s", g.argv[2]);
  }

  db_create_repository(g.argv[2]);
  db_open_repository(g.argv[2]);
  db_open_config(0);
  if( zTemplate ) db_attach(zTemplate, "settingSrc");
  db_begin_transaction();
  if( zDate==0 ) zDate = "now";
  db_initial_setup(zTemplate, zDate, zDefaultUser);
  db_end_transaction(0);
  if( zTemplate ) db_detach("settingSrc");
  fossil_print("project-id: %s\n", db_get("project-code", 0));






|







1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
  if( -1 != file_size(g.argv[2]) ){
    fossil_fatal("file already exists: %s", g.argv[2]);
  }

  db_create_repository(g.argv[2]);
  db_open_repository(g.argv[2]);
  db_open_config(0, 0);
  if( zTemplate ) db_attach(zTemplate, "settingSrc");
  db_begin_transaction();
  if( zDate==0 ) zDate = "now";
  db_initial_setup(zTemplate, zDate, zDefaultUser);
  db_end_transaction(0);
  if( zTemplate ) db_detach("settingSrc");
  fossil_print("project-id: %s\n", db_get("project-code", 0));
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
** repository and local databases.
**
** If no such variable exists, return zDefault.  Or, if zName is the name
** of a setting, then the zDefault is ignored and the default value of the
** setting is returned instead.  If zName is a versioned setting, then
** versioned value takes priority.
*/
char *db_get(const char *zName, char *zDefault){
  char *z = 0;
  const Setting *pSetting = db_find_setting(zName, 0);
  if( g.repositoryOpen ){
    z = db_text(0, "SELECT value FROM config WHERE name=%Q", zName);
  }
  if( z==0 && g.zConfigDbName ){
    db_swap_connections();
    z = db_text(0, "SELECT value FROM global_config WHERE name=%Q", zName);
    db_swap_connections();
  }
  if( pSetting!=0 && pSetting->versionable ){
    /* This is a versionable setting, try and get the info from a
    ** checked out file */
    z = db_get_versioned(zName, z);
  }
  if( z==0 ){
    if( zDefault==0 && pSetting && pSetting->def[0] ){
      z = fossil_strdup(pSetting->def);
    }else{
      z = zDefault;
    }
  }
  return z;
}
char *db_get_mtime(const char *zName, char *zFormat, char *zDefault){
  char *z = 0;
  if( g.repositoryOpen ){
    z = db_text(0, "SELECT mtime FROM config WHERE name=%Q", zName);
  }
  if( z==0 ){
    z = zDefault;
  }else if( zFormat!=0 ){
    z = db_text(0, "SELECT strftime(%Q,%Q,'unixepoch');", zFormat, z);
  }
  return z;
}
void db_set(const char *zName, const char *zValue, int globalFlag){
  db_begin_transaction();






|



















|




|





|







2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
** repository and local databases.
**
** If no such variable exists, return zDefault.  Or, if zName is the name
** of a setting, then the zDefault is ignored and the default value of the
** setting is returned instead.  If zName is a versioned setting, then
** versioned value takes priority.
*/
char *db_get(const char *zName, const char *zDefault){
  char *z = 0;
  const Setting *pSetting = db_find_setting(zName, 0);
  if( g.repositoryOpen ){
    z = db_text(0, "SELECT value FROM config WHERE name=%Q", zName);
  }
  if( z==0 && g.zConfigDbName ){
    db_swap_connections();
    z = db_text(0, "SELECT value FROM global_config WHERE name=%Q", zName);
    db_swap_connections();
  }
  if( pSetting!=0 && pSetting->versionable ){
    /* This is a versionable setting, try and get the info from a
    ** checked out file */
    z = db_get_versioned(zName, z);
  }
  if( z==0 ){
    if( zDefault==0 && pSetting && pSetting->def[0] ){
      z = fossil_strdup(pSetting->def);
    }else{
      z = fossil_strdup(zDefault);
    }
  }
  return z;
}
char *db_get_mtime(const char *zName, const char *zFormat, const char *zDefault){
  char *z = 0;
  if( g.repositoryOpen ){
    z = db_text(0, "SELECT mtime FROM config WHERE name=%Q", zName);
  }
  if( z==0 ){
    z = fossil_strdup(zDefault);
  }else if( zFormat!=0 ){
    z = db_text(0, "SELECT strftime(%Q,%Q,'unixepoch');", zFormat, z);
  }
  return z;
}
void db_set(const char *zName, const char *zValue, int globalFlag){
  db_begin_transaction();
2108
2109
2110
2111
2112
2113
2114







2115
2116
2117
2118
2119
2120
2121
2122
2123
}
int db_get_boolean(const char *zName, int dflt){
  char *zVal = db_get(zName, dflt ? "on" : "off");
  if( is_truth(zVal) ) return 1;
  if( is_false(zVal) ) return 0;
  return dflt;
}







char *db_lget(const char *zName, char *zDefault){
  return db_text((char*)zDefault,
                 "SELECT value FROM vvar WHERE name=%Q", zName);
}
void db_lset(const char *zName, const char *zValue){
  db_multi_exec("REPLACE INTO vvar(name,value) VALUES(%Q,%Q)", zName, zValue);
}
int db_lget_int(const char *zName, int dflt){
  return db_int(dflt, "SELECT value FROM vvar WHERE name=%Q", zName);






>
>
>
>
>
>
>
|
|







2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
}
int db_get_boolean(const char *zName, int dflt){
  char *zVal = db_get(zName, dflt ? "on" : "off");
  if( is_truth(zVal) ) return 1;
  if( is_false(zVal) ) return 0;
  return dflt;
}
int db_get_versioned_boolean(const char *zName, int dflt){
  char *zVal = db_get_versioned(zName, 0);
  if( zVal==0 ) return dflt;
  if( is_truth(zVal) ) return 1;
  if( is_false(zVal) ) return 0;
  return dflt;
}
char *db_lget(const char *zName, const char *zDefault){
  return db_text(zDefault,
                 "SELECT value FROM vvar WHERE name=%Q", zName);
}
void db_lset(const char *zName, const char *zValue){
  db_multi_exec("REPLACE INTO vvar(name,value) VALUES(%Q,%Q)", zName, zValue);
}
int db_lget_int(const char *zName, int dflt){
  return db_int(dflt, "SELECT value FROM vvar WHERE name=%Q", zName);
2217
2218
2219
2220
2221
2222
2223

2224
2225
2226
2227
2228
2229
2230
** See also: close
*/
void cmd_open(void){
  int emptyFlag;
  int keepFlag;
  int forceMissingFlag;
  int allowNested;

  static char *azNewArgv[] = { 0, "checkout", "--prompt", 0, 0, 0, 0 };

  url_proxy_options();
  emptyFlag = find_option("empty",0,0)!=0;
  keepFlag = find_option("keep",0,0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  allowNested = find_option("nested",0,0)!=0;






>







2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
** See also: close
*/
void cmd_open(void){
  int emptyFlag;
  int keepFlag;
  int forceMissingFlag;
  int allowNested;
  int allowSymlinks;
  static char *azNewArgv[] = { 0, "checkout", "--prompt", 0, 0, 0, 0 };

  url_proxy_options();
  emptyFlag = find_option("empty",0,0)!=0;
  keepFlag = find_option("keep",0,0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  allowNested = find_option("nested",0,0)!=0;
2247
2248
2249
2250
2251
2252
2253
2254






2255


2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
















2270
2271
2272
2273
2274
2275
2276
    }else if( db_exists("SELECT 1 FROM event WHERE type='ci'") ){
      g.zOpenRevision = db_get("main-branch", "trunk");
    }
  }

  if( g.zOpenRevision ){
    /* Since the repository is open and we know the revision now,
    ** refresh the allow-symlinks flag. */






    g.allowSymlinks = db_get_boolean("allow-symlinks", 0);


  }

#if defined(_WIN32) || defined(__CYGWIN__)
# define LOCALDB_NAME "./_FOSSIL_"
#else
# define LOCALDB_NAME "./.fslckout"
#endif
  db_init_database(LOCALDB_NAME, zLocalSchema,
#ifdef FOSSIL_LOCAL_WAL
                   "COMMIT; PRAGMA journal_mode=WAL; BEGIN;",
#endif
                   (char*)0);
  db_delete_on_failure(LOCALDB_NAME);
  db_open_local(0);
















  db_lset("repository", g.argv[2]);
  db_record_repository_filename(g.argv[2]);
  db_lset_int("checkout", 0);
  azNewArgv[0] = g.argv[0];
  g.argv = azNewArgv;
  if( !emptyFlag ){
    g.argc = 3;






|
>
>
>
>
>
>
|
>
>














>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
    }else if( db_exists("SELECT 1 FROM event WHERE type='ci'") ){
      g.zOpenRevision = db_get("main-branch", "trunk");
    }
  }

  if( g.zOpenRevision ){
    /* Since the repository is open and we know the revision now,
    ** refresh the allow-symlinks flag.  Since neither the local
    ** checkout nor the configuration database are open at this
    ** point, this should always return the versioned setting,
    ** if any, or the default value, which is negative one.  The
    ** value negative one, in this context, means that the code
    ** below should fallback to using the setting value from the
    ** repository or global configuration databases only. */
    allowSymlinks = db_get_versioned_boolean("allow-symlinks", -1);
  }else{
    allowSymlinks = -1; /* Use non-versioned settings only. */
  }

#if defined(_WIN32) || defined(__CYGWIN__)
# define LOCALDB_NAME "./_FOSSIL_"
#else
# define LOCALDB_NAME "./.fslckout"
#endif
  db_init_database(LOCALDB_NAME, zLocalSchema,
#ifdef FOSSIL_LOCAL_WAL
                   "COMMIT; PRAGMA journal_mode=WAL; BEGIN;",
#endif
                   (char*)0);
  db_delete_on_failure(LOCALDB_NAME);
  db_open_local(0);
  if( allowSymlinks>=0 ){
    /* Use the value from the versioned setting, which was read
    ** prior to opening the local checkout (i.e. which is most
    ** likely empty and does not actually contain any versioned
    ** setting files yet).  Normally, this value would be given
    ** first priority within db_get_boolean(); however, this is
    ** a special case because we know the on-disk files may not
    ** exist yet. */
    g.allowSymlinks = allowSymlinks;
  }else{
    /* Since the local checkout may not have any files at this
    ** point, this will probably be the setting value from the
    ** repository or global configuration databases. */
    g.allowSymlinks = db_get_boolean("allow-symlinks",
                                     db_allow_symlinks_by_default());
  }
  db_lset("repository", g.argv[2]);
  db_record_repository_filename(g.argv[2]);
  db_lset_int("checkout", 0);
  azNewArgv[0] = g.argv[0];
  g.argv = azNewArgv;
  if( !emptyFlag ){
    g.argc = 3;
2354
2355
2356
2357
2358
2359
2360

2361



2362
2363
2364
2365
2366
2367
2368
  const char *def;      /* Default value */
};
#endif /* INTERFACE */

const Setting aSetting[] = {
  { "access-log",       0,              0, 0, 0, "off"                 },
  { "admin-log",        0,              0, 0, 0, "off"                 },

  { "allow-symlinks",   0,              0, 1, 0, "off"                 },



  { "auto-captcha",     "autocaptcha",  0, 0, 0, "on"                  },
  { "auto-hyperlink",   0,              0, 0, 0, "on",                 },
  { "auto-shun",        0,              0, 0, 0, "on"                  },
  { "autosync",         0,              0, 0, 0, "on"                  },
  { "autosync-tries",   0,             16, 0, 0, "1"                   },
  { "binary-glob",      0,             40, 1, 0, ""                    },
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__DARWIN__) || \






>

>
>
>







2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
  const char *def;      /* Default value */
};
#endif /* INTERFACE */

const Setting aSetting[] = {
  { "access-log",       0,              0, 0, 0, "off"                 },
  { "admin-log",        0,              0, 0, 0, "off"                 },
#if defined(_WIN32)
  { "allow-symlinks",   0,              0, 1, 0, "off"                 },
#else
  { "allow-symlinks",   0,              0, 1, 0, "on"                  },
#endif
  { "auto-captcha",     "autocaptcha",  0, 0, 0, "on"                  },
  { "auto-hyperlink",   0,              0, 0, 0, "on",                 },
  { "auto-shun",        0,              0, 0, 0, "on"                  },
  { "autosync",         0,              0, 0, 0, "on"                  },
  { "autosync-tries",   0,             16, 0, 0, "1"                   },
  { "binary-glob",      0,             40, 1, 0, ""                    },
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__DARWIN__) || \
2378
2379
2380
2381
2382
2383
2384





2385
2386
2387
2388
2389
2390
2391
  { "diff-binary",      0,              0, 0, 0, "on"                  },
  { "diff-command",     0,             40, 0, 0, ""                    },
  { "dont-push",        0,              0, 0, 0, "off"                 },
  { "dotfiles",         0,              0, 1, 0, "off"                 },
  { "editor",           0,             32, 0, 0, ""                    },
  { "empty-dirs",       0,             40, 1, 0, ""                    },
  { "encoding-glob",    0,             40, 1, 0, ""                    },





  { "gdiff-command",    0,             40, 0, 0, "gdiff"               },
  { "gmerge-command",   0,             40, 0, 0, ""                    },
  { "hash-digits",      0,              5, 0, 0, "10"                  },
  { "http-port",        0,             16, 0, 0, "8080"                },
  { "https-login",      0,              0, 0, 0, "off"                 },
  { "ignore-glob",      0,             40, 1, 0, ""                    },
  { "keep-glob",        0,             40, 1, 0, ""                    },






>
>
>
>
>







2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
  { "diff-binary",      0,              0, 0, 0, "on"                  },
  { "diff-command",     0,             40, 0, 0, ""                    },
  { "dont-push",        0,              0, 0, 0, "off"                 },
  { "dotfiles",         0,              0, 1, 0, "off"                 },
  { "editor",           0,             32, 0, 0, ""                    },
  { "empty-dirs",       0,             40, 1, 0, ""                    },
  { "encoding-glob",    0,             40, 1, 0, ""                    },
#if defined(FOSSIL_ENABLE_EXEC_REL_PATHS)
  { "exec-rel-paths",   0,              0, 0, 0, "on"                  },
#else
  { "exec-rel-paths",   0,              0, 0, 0, "off"                 },
#endif
  { "gdiff-command",    0,             40, 0, 0, "gdiff"               },
  { "gmerge-command",   0,             40, 0, 0, ""                    },
  { "hash-digits",      0,              5, 0, 0, "10"                  },
  { "http-port",        0,             16, 0, 0, "8080"                },
  { "https-login",      0,              0, 0, 0, "off"                 },
  { "ignore-glob",      0,             40, 1, 0, ""                    },
  { "keep-glob",        0,             40, 1, 0, ""                    },
2553
2554
2555
2556
2557
2558
2559



2560
2561
2562
2563
2564
2565
2566
**                     created.
**
**    encoding-glob    The VALUE is a comma or newline-separated list of GLOB
**     (versionable)   patterns specifying files that the "commit" command will
**                     ignore when issuing warnings about text files that may
**                     use another encoding than ASCII or UTF-8. Set to "*"
**                     to disable encoding checking.



**
**    gdiff-command    External command to run when performing a graphical
**                     diff. If undefined, text diff will be used.
**
**    gmerge-command   A graphical merge conflict resolver command operating
**                     on four files.
**                     Ex: kdiff3 "%baseline" "%original" "%merge" -o "%output"






>
>
>







2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
**                     created.
**
**    encoding-glob    The VALUE is a comma or newline-separated list of GLOB
**     (versionable)   patterns specifying files that the "commit" command will
**                     ignore when issuing warnings about text files that may
**                     use another encoding than ASCII or UTF-8. Set to "*"
**                     to disable encoding checking.
**
**    exec-rel-paths   When executing certain external commands (e.g. diff and
**                     gdiff), use relative paths.
**
**    gdiff-command    External command to run when performing a graphical
**                     diff. If undefined, text diff will be used.
**
**    gmerge-command   A graphical merge conflict resolver command operating
**                     on four files.
**                     Ex: kdiff3 "%baseline" "%original" "%merge" -o "%output"
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
**
** See also: configuration
*/
void setting_cmd(void){
  int i;
  int globalFlag = find_option("global","g",0)!=0;
  int unsetFlag = g.argv[1][0]=='u';
  db_open_config(1);
  if( !globalFlag ){
    db_find_and_open_repository(OPEN_ANY_SCHEMA | OPEN_OK_NOT_FOUND, 0);
  }
  if( !g.repositoryOpen ){
    globalFlag = 1;
  }
  if( unsetFlag && g.argc!=3 ){






|







2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
**
** See also: configuration
*/
void setting_cmd(void){
  int i;
  int globalFlag = find_option("global","g",0)!=0;
  int unsetFlag = g.argv[1][0]=='u';
  db_open_config(1, 0);
  if( !globalFlag ){
    db_find_and_open_repository(OPEN_ANY_SCHEMA | OPEN_OK_NOT_FOUND, 0);
  }
  if( !g.repositoryOpen ){
    globalFlag = 1;
  }
  if( unsetFlag && g.argc!=3 ){
Changes to src/delta.c.
100
101
102
103
104
105
106
107
108
109
110
111
112

113
114
115
116
117
118
119
};

/*
** Initialize the rolling hash using the first NHASH characters of z[]
*/
static void hash_init(hash *pHash, const char *z){
  u16 a, b, i;
  a = b = 0;
  for(i=0; i<NHASH; i++){
    a += z[i];
    b += (NHASH-i)*z[i];
    pHash->z[i] = z[i];
  }

  pHash->a = a & 0xffff;
  pHash->b = b & 0xffff;
  pHash->i = 0;
}

/*
** Advance the rolling hash by a single character "c"






|
|

|
<

>







100
101
102
103
104
105
106
107
108
109
110

111
112
113
114
115
116
117
118
119
};

/*
** Initialize the rolling hash using the first NHASH characters of z[]
*/
static void hash_init(hash *pHash, const char *z){
  u16 a, b, i;
  a = b = z[0];
  for(i=1; i<NHASH; i++){
    a += z[i];
    b += a;

  }
  memcpy(pHash->z, z, NHASH);
  pHash->a = a & 0xffff;
  pHash->b = b & 0xffff;
  pHash->i = 0;
}

/*
** Advance the rolling hash by a single character "c"
128
129
130
131
132
133
134


















135
136
137
138
139
140
141
/*
** Return a 32-bit hash value
*/
static u32 hash_32bit(hash *pHash){
  return (pHash->a & 0xffff) | (((u32)(pHash->b & 0xffff))<<16);
}



















/*
** Write an base-64 integer into the given buffer.
*/
static void putInt(unsigned int v, char **pz){
  static const char zDigits[] =
    "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~";






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
/*
** Return a 32-bit hash value
*/
static u32 hash_32bit(hash *pHash){
  return (pHash->a & 0xffff) | (((u32)(pHash->b & 0xffff))<<16);
}

/*
** Compute a hash on NHASH bytes.
**
** This routine is intended to be equivalent to:
**    hash h;
**    hash_init(&h, zInput);
**    return hash_32bit(&h);
*/
static u32 hash_once(const char *z){
  u16 a, b, i;
  a = b = z[0];
  for(i=1; i<NHASH; i++){
    a += z[i];
    b += a;
  }
  return a | (((u32)b)<<16);
}

/*
** Write an base-64 integer into the given buffer.
*/
static void putInt(unsigned int v, char **pz){
  static const char zDigits[] =
    "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~";
189
190
191
192
193
194
195






196
197



198
199

200

201




















202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221


222
223
224
225
226
227
228
229
230
231
232
233
234
235
*/
static int digit_count(int v){
  unsigned int i, x;
  for(i=1, x=64; v>=x; i++, x <<= 6){}
  return i;
}







/*
** Compute a 32-bit checksum on the N-byte buffer.  Return the result.



*/
static unsigned int checksum(const char *zIn, size_t N){

  const unsigned char *z = (const unsigned char *)zIn;

  unsigned sum0 = 0;




















  unsigned sum1 = 0;
  unsigned sum2 = 0;
  unsigned sum3 = 0;
  while(N >= 16){
    sum0 += ((unsigned)z[0] + z[4] + z[8] + z[12]);
    sum1 += ((unsigned)z[1] + z[5] + z[9] + z[13]);
    sum2 += ((unsigned)z[2] + z[6] + z[10]+ z[14]);
    sum3 += ((unsigned)z[3] + z[7] + z[11]+ z[15]);
    z += 16;
    N -= 16;
  }
  while(N >= 4){
    sum0 += z[0];
    sum1 += z[1];
    sum2 += z[2];
    sum3 += z[3];
    z += 4;
    N -= 4;
  }
  sum3 += (sum2 << 8) + (sum1 << 16) + (sum0 << 24);


  switch(N){
    case 3:   sum3 += (z[2] << 8);
    case 2:   sum3 += (z[1] << 16);
    case 1:   sum3 += (z[0] << 24);
    default:  ;
  }
  return sum3;
}

/*
** Create a new delta.
**
** The delta is written into a preallocated buffer, zDelta, which
** should be at least 60 bytes longer than the target file, zOut.






>
>
>
>
>
>

|
>
>
>


>

>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
|
|
|
|


|







207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
*/
static int digit_count(int v){
  unsigned int i, x;
  for(i=1, x=64; v>=x; i++, x <<= 6){}
  return i;
}

#ifdef __GNUC__
# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
#else
# define GCC_VERSION 0
#endif

/*
** Compute a 32-bit big-endian checksum on the N-byte buffer.  If the
** buffer is not a multiple of 4 bytes length, compute the sum that would
** have occurred if the buffer was padded with zeros to the next multiple
** of four bytes.
*/
static unsigned int checksum(const char *zIn, size_t N){
  static const int byteOrderTest = 1;
  const unsigned char *z = (const unsigned char *)zIn;
  const unsigned char *zEnd = (const unsigned char*)&zIn[N&~3];
  unsigned sum = 0;
  assert( (z - (const unsigned char*)0)%4==0 );  /* Four-byte alignment */
  if( 0==*(char*)&byteOrderTest ){
    /* This is a big-endian machine */
    while( z<zEnd ){
      sum += *(unsigned*)z;
      z += 4;
    }
  }else{
    /* A little-endian machine */
#if GCC_VERSION>=4003000
    while( z<zEnd ){
      sum += __builtin_bswap32(*(unsigned*)z);
      z += 4;
    }
#elif defined(_MSC_VER) && _MSC_VER>=1300
    while( z<zEnd ){
      sum += _byteswap_ulong(*(unsigned*)z);
      z += 4;
    }
#else    
    unsigned sum0 = 0;
    unsigned sum1 = 0;
    unsigned sum2 = 0;
    while(N >= 16){
      sum0 += ((unsigned)z[0] + z[4] + z[8] + z[12]);
      sum1 += ((unsigned)z[1] + z[5] + z[9] + z[13]);
      sum2 += ((unsigned)z[2] + z[6] + z[10]+ z[14]);
      sum  += ((unsigned)z[3] + z[7] + z[11]+ z[15]);
      z += 16;
      N -= 16;
    }
    while(N >= 4){
      sum0 += z[0];
      sum1 += z[1];
      sum2 += z[2];
      sum  += z[3];
      z += 4;
      N -= 4;
    }
    sum += (sum2 << 8) + (sum1 << 16) + (sum0 << 24);
#endif
  }
  switch(N&3){
    case 3:   sum += (z[2] << 8);
    case 2:   sum += (z[1] << 16);
    case 1:   sum += (z[0] << 24);
    default:  ;
  }
  return sum;
}

/*
** Create a new delta.
**
** The delta is written into a preallocated buffer, zDelta, which
** should be at least 60 bytes longer than the target file, zOut.
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
  */
  nHash = lenSrc/NHASH;
  collide = fossil_malloc( nHash*2*sizeof(int) );
  landmark = &collide[nHash];
  memset(landmark, -1, nHash*sizeof(int));
  memset(collide, -1, nHash*sizeof(int));
  for(i=0; i<lenSrc-NHASH; i+=NHASH){
    int hv;
    hash_init(&h, &zSrc[i]);
    hv = hash_32bit(&h) % nHash;
    collide[i/NHASH] = landmark[hv];
    landmark[hv] = i/NHASH;
  }

  /* Begin scanning the target file and generating copy commands and
  ** literal sections of the delta.
  */






|
<
<







379
380
381
382
383
384
385
386


387
388
389
390
391
392
393
  */
  nHash = lenSrc/NHASH;
  collide = fossil_malloc( nHash*2*sizeof(int) );
  landmark = &collide[nHash];
  memset(landmark, -1, nHash*sizeof(int));
  memset(collide, -1, nHash*sizeof(int));
  for(i=0; i<lenSrc-NHASH; i+=NHASH){
    int hv = hash_once(&zSrc[i]) % nHash;


    collide[i/NHASH] = landmark[hv];
    landmark[hv] = i/NHASH;
  }

  /* Begin scanning the target file and generating copy commands and
  ** literal sections of the delta.
  */
371
372
373
374
375
376
377

378
379
380
381
382


383
384
385
386
387
388
389
390
391
392
        ** sz will be the overhead (in bytes) needed to encode the copy
        ** command.  Only generate copy command if the overhead of the
        ** copy command is less than the amount of literal text to be copied.
        */
        int cnt, ofst, litsz;
        int j, k, x, y;
        int sz;


        /* Beginning at iSrc, match forwards as far as we can.  j counts
        ** the number of characters that match */
        iSrc = iBlock*NHASH;
        for(j=0, x=iSrc, y=base+i; x<lenSrc && y<lenOut; j++, x++, y++){


          if( zSrc[x]!=zOut[y] ) break;
        }
        j--;

        /* Beginning at iSrc-1, match backwards as far as we can.  k counts
        ** the number of characters that match */
        for(k=1; k<iSrc && k<=i; k++){
          if( zSrc[iSrc-k]!=zOut[base+i-k] ) break;
        }
        k--;






>




|
>
>


|







420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
        ** sz will be the overhead (in bytes) needed to encode the copy
        ** command.  Only generate copy command if the overhead of the
        ** copy command is less than the amount of literal text to be copied.
        */
        int cnt, ofst, litsz;
        int j, k, x, y;
        int sz;
        int limitX;

        /* Beginning at iSrc, match forwards as far as we can.  j counts
        ** the number of characters that match */
        iSrc = iBlock*NHASH;
        y = base+i;
        limitX = ( lenSrc-iSrc <= lenOut-y ) ? lenSrc : iSrc + lenOut - y;
        for(x=iSrc; x<limitX; x++, y++){
          if( zSrc[x]!=zOut[y] ) break;
        }
        j = x - iSrc - 1;

        /* Beginning at iSrc-1, match backwards as far as we can.  k counts
        ** the number of characters that match */
        for(k=1; k<iSrc && k<=i; k++){
          if( zSrc[iSrc-k]!=zOut[base+i-k] ) break;
        }
        k--;
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
  (void)getInt(&zDelta, &lenDelta);
  if( *zDelta!='\n' ){
    /* ERROR: size integer not terminated by "\n" */
    return -1;
  }
  zDelta++; lenDelta--;
  while( *zDelta && lenDelta>0 ){
    unsigned int cnt, ofst;
    cnt = getInt(&zDelta, &lenDelta);
    switch( zDelta[0] ){
      case '@': {
        zDelta++; lenDelta--;
        ofst = getInt(&zDelta, &lenDelta);
        if( lenDelta>0 && zDelta[0]!=',' ){
          /* ERROR: copy command not terminated by ',' */
          return -1;
        }
        zDelta++; lenDelta--;
        nCopy += cnt;
        break;






|




|







670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
  (void)getInt(&zDelta, &lenDelta);
  if( *zDelta!='\n' ){
    /* ERROR: size integer not terminated by "\n" */
    return -1;
  }
  zDelta++; lenDelta--;
  while( *zDelta && lenDelta>0 ){
    unsigned int cnt;
    cnt = getInt(&zDelta, &lenDelta);
    switch( zDelta[0] ){
      case '@': {
        zDelta++; lenDelta--;
        (void)getInt(&zDelta, &lenDelta);
        if( lenDelta>0 && zDelta[0]!=',' ){
          /* ERROR: copy command not terminated by ',' */
          return -1;
        }
        zDelta++; lenDelta--;
        nCopy += cnt;
        break;
Changes to src/deltacmd.c.
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

99
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
** Create and a delta that carries FILE1 into FILE2.  Print the
** number bytes copied and the number of bytes inserted.
*/
void delta_analyze_cmd(void){
  Blob orig, target, delta;
  int nCopy = 0;
  int nInsert = 0;
  int sz1, sz2;
  if( g.argc!=4 ){
    usage("ORIGIN TARGET");
  }
  if( blob_read_from_file(&orig, g.argv[2])<0 ){
    fossil_fatal("cannot read %s\n", g.argv[2]);
  }
  if( blob_read_from_file(&target, g.argv[3])<0 ){
    fossil_fatal("cannot read %s\n", g.argv[3]);
  }
  blob_delta_create(&orig, &target, &delta);
  delta_analyze(blob_buffer(&delta), blob_size(&delta), &nCopy, &nInsert);
  sz1 = blob_size(&orig);
  sz2 = blob_size(&target);

  blob_reset(&orig);
  blob_reset(&target);
  blob_reset(&delta);
  fossil_print("original size:  %8d\n", sz1);
  fossil_print("bytes copied:   %8d (%.1f%% of target)\n",
               nCopy, (100.0*nCopy)/sz2);
  fossil_print("bytes inserted: %8d (%.1f%% of target)\n",
               nInsert, (100.0*nInsert)/sz2);
  fossil_print("final size:     %8d\n", sz2);

}

/*
** Apply the delta in pDelta to the original file pOriginal to generate
** the target file pTarget.  The pTarget blob is initialized by this
** routine.
**






|













>




|

|


>







78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
** Create and a delta that carries FILE1 into FILE2.  Print the
** number bytes copied and the number of bytes inserted.
*/
void delta_analyze_cmd(void){
  Blob orig, target, delta;
  int nCopy = 0;
  int nInsert = 0;
  int sz1, sz2, sz3;
  if( g.argc!=4 ){
    usage("ORIGIN TARGET");
  }
  if( blob_read_from_file(&orig, g.argv[2])<0 ){
    fossil_fatal("cannot read %s\n", g.argv[2]);
  }
  if( blob_read_from_file(&target, g.argv[3])<0 ){
    fossil_fatal("cannot read %s\n", g.argv[3]);
  }
  blob_delta_create(&orig, &target, &delta);
  delta_analyze(blob_buffer(&delta), blob_size(&delta), &nCopy, &nInsert);
  sz1 = blob_size(&orig);
  sz2 = blob_size(&target);
  sz3 = blob_size(&delta);
  blob_reset(&orig);
  blob_reset(&target);
  blob_reset(&delta);
  fossil_print("original size:  %8d\n", sz1);
  fossil_print("bytes copied:   %8d (%.2f%% of target)\n",
               nCopy, (100.0*nCopy)/sz2);
  fossil_print("bytes inserted: %8d (%.2f%% of target)\n",
               nInsert, (100.0*nInsert)/sz2);
  fossil_print("final size:     %8d\n", sz2);
  fossil_print("delta size:     %8d\n", sz3);
}

/*
** Apply the delta in pDelta to the original file pOriginal to generate
** the target file pTarget.  The pTarget blob is initialized by this
** routine.
**
Changes to src/descendants.c.
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

199


200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
    "INSERT INTO ok"
    "  SELECT rid FROM ancestor;",
    rid, rid, directOnly ? "AND plink.isPrim" : "", N
  );
}

/*
** Compute up to N direct ancestors (merge ancestors do not count)
** for the check-in rid and put them in a table named "ancestor".
** Label each generation with consecutive integers going backwards
** in time such that rid has the smallest generation number and the oldest
** direct ancestor as the largest generation number.
*/
void compute_direct_ancestors(int rid, int N){
  Stmt ins;
  Stmt q;
  int gen = 0;
  db_multi_exec(
    "CREATE TEMP TABLE IF NOT EXISTS ancestor(rid INTEGER UNIQUE NOT NULL,"
                                            " generation INTEGER PRIMARY KEY);"
    "DELETE FROM ancestor;"

    "INSERT INTO ancestor VALUES(%d, 0);", rid


  );
  db_prepare(&ins, "INSERT INTO ancestor VALUES(:rid, :gen)");
  db_prepare(&q,
    "SELECT pid FROM plink"
    " WHERE cid=:rid AND isprim"
  );
  while( (N--)>0 ){
    db_bind_int(&q, ":rid", rid);
    if( db_step(&q)!=SQLITE_ROW ) break;
    rid = db_column_int(&q, 0);
    db_reset(&q);
    gen++;
    db_bind_int(&ins, ":rid", rid);
    db_bind_int(&ins, ":gen", gen);
    db_step(&ins);
    db_reset(&ins);
  }
  db_finalize(&ins);
  db_finalize(&q);
}

/*
** Compute the "mtime" of the file given whose blob.rid is "fid" that
** is part of check-in "vid".  The mtime will be the mtime on vid or
** some ancestor of vid where fid first appears.
*/






|





|
<
<
<




>
|
>
>
|
|
<
|
<

<
<
<
<
<
<
<
<
<
<
<
<
<







178
179
180
181
182
183
184
185
186
187
188
189
190
191



192
193
194
195
196
197
198
199
200
201

202

203













204
205
206
207
208
209
210
    "INSERT INTO ok"
    "  SELECT rid FROM ancestor;",
    rid, rid, directOnly ? "AND plink.isPrim" : "", N
  );
}

/*
** Compute all direct ancestors (merge ancestors do not count)
** for the check-in rid and put them in a table named "ancestor".
** Label each generation with consecutive integers going backwards
** in time such that rid has the smallest generation number and the oldest
** direct ancestor as the largest generation number.
*/
void compute_direct_ancestors(int rid){



  db_multi_exec(
    "CREATE TEMP TABLE IF NOT EXISTS ancestor(rid INTEGER UNIQUE NOT NULL,"
                                            " generation INTEGER PRIMARY KEY);"
    "DELETE FROM ancestor;"
    "WITH RECURSIVE g(x,i) AS ("
    "  VALUES(%d,1)"
    "  UNION ALL"
    "  SELECT plink.pid, g.i+1 FROM plink, g"
    "   WHERE plink.cid=g.x AND plink.isprim)"
    "INSERT INTO ancestor(rid,generation) SELECT x,i FROM g;", 

    rid

  );













}

/*
** Compute the "mtime" of the file given whose blob.rid is "fid" that
** is part of check-in "vid".  The mtime will be the mtime on vid or
** some ancestor of vid where fid first appears.
*/
Changes to src/diff.c.
787
788
789
790
791
792
793






794



























795
796
797
798
799
800
801
      nSuffix++;
    }
    if( nSuffix<nShort ){
      while( nSuffix>0 && (zLeft[nLeft-nSuffix]&0xc0)==0x80 ) nSuffix--;
    }
    if( nSuffix==nLeft || nSuffix==nRight ) nPrefix = 0;
  }






  if( nPrefix+nSuffix > nShort ) nPrefix = nShort - nSuffix;




























  /* A single chunk of text inserted on the right */
  if( nPrefix+nSuffix==nLeft ){
    sbsWriteLineno(p, lnLeft, SBS_LNA);
    p->iStart2 = p->iEnd2 = 0;
    p->iStart = p->iEnd = -1;
    sbsWriteText(p, pLeft, SBS_TXTA);






>
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
      nSuffix++;
    }
    if( nSuffix<nShort ){
      while( nSuffix>0 && (zLeft[nLeft-nSuffix]&0xc0)==0x80 ) nSuffix--;
    }
    if( nSuffix==nLeft || nSuffix==nRight ) nPrefix = 0;
  }

  /* If the prefix and suffix overlap, that means that we are dealing with
  ** a pure insertion or deletion of text that can have multiple alignments.
  ** Try to find an alignment to begins and ends on whitespace, or on
  ** punctuation, rather than in the middle of a name or number.
  */
  if( nPrefix+nSuffix > nShort ){
    int iBest = -1;
    int iBestVal = -1;
    int i;
    int nLong = nLeft<nRight ? nRight : nLeft;
    int nGap = nLong - nShort;
    for(i=nShort-nSuffix; i<=nPrefix; i++){
       int iVal = 0;
       char c = zLeft[i];
       if( fossil_isspace(c) ){
         iVal += 5;
       }else if( !fossil_isalnum(c) ){
         iVal += 2;
       }
       c = zLeft[i+nGap-1];
       if( fossil_isspace(c) ){
         iVal += 5;
       }else if( !fossil_isalnum(c) ){
         iVal += 2;
       }
       if( iVal>iBestVal ){
         iBestVal = iVal;
         iBest = i;
       }
    }
    nPrefix = iBest;
    nSuffix = nShort - nPrefix;
  }

  /* A single chunk of text inserted on the right */
  if( nPrefix+nSuffix==nLeft ){
    sbsWriteLineno(p, lnLeft, SBS_LNA);
    p->iStart2 = p->iEnd2 = 0;
    p->iStart = p->iEnd = -1;
    sbsWriteText(p, pLeft, SBS_TXTA);
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
  p->nOrig = p->c.nTo;
  return 0;
}

/*
** The input pParent is the next most recent ancestor of the file
** being annotated.  Do another step of the annotation.  Return true
** if additional annotation is required.  zPName is the tag to insert
** on each line of the file being annotated that was contributed by
** pParent.  Memory to hold zPName is leaked.
*/
static int annotation_step(Annotator *p, Blob *pParent, int iVers, u64 diffFlags){
  int i, j;
  int lnTo;

  /* Prepare the parent file to be diffed */
  p->c.aFrom = break_into_lines(blob_str(pParent), blob_size(pParent),






|
<
<







2086
2087
2088
2089
2090
2091
2092
2093


2094
2095
2096
2097
2098
2099
2100
  p->nOrig = p->c.nTo;
  return 0;
}

/*
** The input pParent is the next most recent ancestor of the file
** being annotated.  Do another step of the annotation.  Return true
** if additional annotation is required.


*/
static int annotation_step(Annotator *p, Blob *pParent, int iVers, u64 diffFlags){
  int i, j;
  int lnTo;

  /* Prepare the parent file to be diffed */
  p->c.aFrom = break_into_lines(blob_str(pParent), blob_size(pParent),
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
/* Annotation flags (any DIFF flag can be used as Annotation flag as well) */
#define ANN_FILE_VERS   (((u64)0x20)<<32) /* Show file vers rather than commit vers */
#define ANN_FILE_ANCEST (((u64)0x40)<<32) /* Prefer check-ins in the ANCESTOR table */

/*
** Compute a complete annotation on a file.  The file is identified
** by its filename number (filename.fnid) and the baseline in which
** it was checked in (mlink.mid).
*/
static void annotate_file(
  Annotator *p,        /* The annotator */
  int fnid,            /* The name of the file to be annotated */
  int mid,             /* Use the version of the file in this check-in */
  int iLimit,          /* Limit the number of levels if greater than zero */
  u64 annFlags         /* Flags to alter the annotation */






|
<







2137
2138
2139
2140
2141
2142
2143
2144

2145
2146
2147
2148
2149
2150
2151
/* Annotation flags (any DIFF flag can be used as Annotation flag as well) */
#define ANN_FILE_VERS   (((u64)0x20)<<32) /* Show file vers rather than commit vers */
#define ANN_FILE_ANCEST (((u64)0x40)<<32) /* Prefer check-ins in the ANCESTOR table */

/*
** Compute a complete annotation on a file.  The file is identified
** by its filename number (filename.fnid) and check-in (mlink.mid).

*/
static void annotate_file(
  Annotator *p,        /* The annotator */
  int fnid,            /* The name of the file to be annotated */
  int mid,             /* Use the version of the file in this check-in */
  int iLimit,          /* Limit the number of levels if greater than zero */
  u64 annFlags         /* Flags to alter the annotation */
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
  ignoreWs = P("w")!=0;
  if( ignoreWs ) annFlags |= DIFF_IGNORE_ALLWS;
  if( !db_exists("SELECT 1 FROM mlink WHERE mid=%d AND fnid=%d",mid,fnid) ){
    fossil_redirect_home();
  }

  /* compute the annotation */
  compute_direct_ancestors(mid, 10000000);
  annotate_file(&ann, fnid, mid, iLimit, annFlags);
  zCI = ann.aVers[0].zMUuid;

  /* generate the web page */
  style_header("Annotation For %h", zFilename);
  if( bBlame ){
    url_initialize(&url, "blame");






|







2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
  ignoreWs = P("w")!=0;
  if( ignoreWs ) annFlags |= DIFF_IGNORE_ALLWS;
  if( !db_exists("SELECT 1 FROM mlink WHERE mid=%d AND fnid=%d",mid,fnid) ){
    fossil_redirect_home();
  }

  /* compute the annotation */
  compute_direct_ancestors(mid);
  annotate_file(&ann, fnid, mid, iLimit, annFlags);
  zCI = ann.aVers[0].zMUuid;

  /* generate the web page */
  style_header("Annotation For %h", zFilename);
  if( bBlame ){
    url_initialize(&url, "blame");
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
    fossil_fatal("not part of current checkout: %s", zFilename);
  }
  cid = db_lget_int("checkout", 0);
  if( cid == 0 ){
    fossil_fatal("Not in a checkout");
  }
  if( iLimit<=0 ) iLimit = 1000000000;
  compute_direct_ancestors(cid, 1000000);
  mid = db_int(0, "SELECT mlink.mid FROM mlink, ancestor "
          " WHERE mlink.fid=%d AND mlink.fnid=%d AND mlink.mid=ancestor.rid"
          " ORDER BY ancestor.generation ASC LIMIT 1",
          fid, fnid);
  if( mid==0 ){
    fossil_fatal("unable to find manifest");
  }






|







2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
    fossil_fatal("not part of current checkout: %s", zFilename);
  }
  cid = db_lget_int("checkout", 0);
  if( cid == 0 ){
    fossil_fatal("Not in a checkout");
  }
  if( iLimit<=0 ) iLimit = 1000000000;
  compute_direct_ancestors(cid);
  mid = db_int(0, "SELECT mlink.mid FROM mlink, ancestor "
          " WHERE mlink.fid=%d AND mlink.fnid=%d AND mlink.mid=ancestor.rid"
          " ORDER BY ancestor.generation ASC LIMIT 1",
          fid, fnid);
  if( mid==0 ){
    fossil_fatal("unable to find manifest");
  }
Changes to src/diffcmd.c.
30
31
32
33
34
35
36






































































37
38
39
40
41
42
43
#  define NULL_DEVICE "/dev/null"
#endif

/*
** Used when the name for the diff is unknown.
*/
#define DIFF_NO_NAME  "(unknown)"







































































/*
** Print the "Index:" message that patches wants to see at the top of a diff.
*/
void diff_print_index(const char *zFile, u64 diffFlags){
  if( (diffFlags & (DIFF_SIDEBYSIDE|DIFF_BRIEF))==0 ){
    char *z = mprintf("Index: %s\n%.66c\n", zFile, '=');






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#  define NULL_DEVICE "/dev/null"
#endif

/*
** Used when the name for the diff is unknown.
*/
#define DIFF_NO_NAME  "(unknown)"

/*
** Use the "exec-rel-paths" setting and the --exec-abs-paths and
** --exec-rel-paths command line options to determine whether
** certain external commands are executed using relative paths.
*/
static int determine_exec_relative_option(int force){
  static int relativePaths = -1;
  if( force || relativePaths==-1 ){
    int relPathOption = find_option("exec-rel-paths", 0, 0)!=0;
    int absPathOption = find_option("exec-abs-paths", 0, 0)!=0;
#if defined(FOSSIL_ENABLE_EXEC_REL_PATHS)
    relativePaths = db_get_boolean("exec-rel-paths", 1);
#else
    relativePaths = db_get_boolean("exec-rel-paths", 0);
#endif
    if( relPathOption ){ relativePaths = 1; }
    if( absPathOption ){ relativePaths = 0; }
  }
  return relativePaths;
}

#if INTERFACE
/*
** An array of FileDirList objects describe the files and directories listed
** on the command line of a "diff" command.  Only those objects listed are
** actually diffed.
*/
struct FileDirList {
  int nUsed;       /* Number of times each entry is used */
  int nName;       /* Length of the entry */
  char *zName;     /* Text of the entry */
};
#endif

/*
** Return true if zFile is a file named on the azInclude[] list or is
** a file in a directory named on the azInclude[] list.
**
** if azInclude is NULL, then always include zFile.
*/
static int file_dir_match(FileDirList *p, const char *zFile){
  if( p==0 || strcmp(p->zName,".")==0 ) return 1;
  if( filenames_are_case_sensitive() ){
    while( p->zName ){
      if( strcmp(zFile, p->zName)==0
       || (strncmp(zFile, p->zName, p->nName)==0
           && zFile[p->nName]=='/')
      ){
        break;
      }
      p++;
    }
  }else{
    while( p->zName ){
      if( fossil_stricmp(zFile, p->zName)==0
       || (fossil_strnicmp(zFile, p->zName, p->nName)==0
           && zFile[p->nName]=='/')
      ){
        break;
      }
      p++;
    }
  }
  if( p->zName ){
    p->nUsed++;
    return 1;
  }
  return 0;
}

/*
** Print the "Index:" message that patches wants to see at the top of a diff.
*/
void diff_print_index(const char *zFile, u64 diffFlags){
  if( (diffFlags & (DIFF_SIDEBYSIDE|DIFF_BRIEF))==0 ){
    char *z = mprintf("Index: %s\n%.66c\n", zFile, '=');
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

334
335
336
337
338
339
340
    /* Delete the temporary file and clean up memory used */
    file_delete(zTemp1);
    file_delete(zTemp2);
    blob_reset(&cmd);
  }
}

/*
** Do a diff against a single file named in zFileTreeName from version zFrom
** against the same file on disk.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_one_against_disk(
  const char *zFrom,        /* Name of file */
  const char *zDiffCmd,     /* Use this "diff" command */
  const char *zBinGlob,     /* Treat file names matching this as binary */
  int fIncludeBinary,       /* Include binary files for external diff */
  u64 diffFlags,            /* Diff control flags */
  const char *zFileTreeName
){
  Blob fname;
  Blob content;
  int isLink;
  int isBin;
  file_tree_name(zFileTreeName, &fname, 0, 1);
  historical_version_of_file(zFrom, blob_str(&fname), &content, &isLink, 0,
                             fIncludeBinary ? 0 : &isBin, 0);
  if( !isLink != !file_wd_islink(zFrom) ){
    fossil_print("%s",DIFF_CANNOT_COMPUTE_SYMLINK);
  }else{
    diff_file(&content, isBin, zFileTreeName, zFileTreeName,
              zDiffCmd, zBinGlob, fIncludeBinary, diffFlags);
  }
  blob_reset(&content);
  blob_reset(&fname);
}

/*
** Run a diff between the version zFrom and files on disk.  zFrom might
** be NULL which means to simply show the difference between the edited
** files on disk and the check-out on which they are based.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_all_against_disk(
  const char *zFrom,        /* Version to difference from */
  const char *zDiffCmd,     /* Use this diff command.  NULL for built-in */
  const char *zBinGlob,     /* Treat file names matching this as binary */
  int fIncludeBinary,       /* Treat file names matching this as binary */
  u64 diffFlags             /* Flags controlling diff output */

){
  int vid;
  Blob sql;
  Stmt q;
  int asNewFile;            /* Treat non-existant files as empty files */

  asNewFile = (diffFlags & DIFF_VERBOSE)!=0;






<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<












|




|
>







343
344
345
346
347
348
349




































350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
    /* Delete the temporary file and clean up memory used */
    file_delete(zTemp1);
    file_delete(zTemp2);
    blob_reset(&cmd);
  }
}





































/*
** Run a diff between the version zFrom and files on disk.  zFrom might
** be NULL which means to simply show the difference between the edited
** files on disk and the check-out on which they are based.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_against_disk(
  const char *zFrom,        /* Version to difference from */
  const char *zDiffCmd,     /* Use this diff command.  NULL for built-in */
  const char *zBinGlob,     /* Treat file names matching this as binary */
  int fIncludeBinary,       /* Treat file names matching this as binary */
  u64 diffFlags,            /* Flags controlling diff output */
  FileDirList *pFileDir     /* Which files to diff */
){
  int vid;
  Blob sql;
  Stmt q;
  int asNewFile;            /* Treat non-existant files as empty files */

  asNewFile = (diffFlags & DIFF_VERBOSE)!=0;
382
383
384
385
386
387
388
389
390
391











392
393
394
395
396
397
398
399
400
401
402
403
404




405
406
407
408
409
410
411
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    int isDeleted = db_column_int(&q, 1);
    int isChnged = db_column_int(&q,2);
    int isNew = db_column_int(&q,3);
    int srcid = db_column_int(&q, 4);
    int isLink = db_column_int(&q, 5);
    char *zToFree = mprintf("%s%s", g.zLocalRoot, zPathname);
    const char *zFullName = zToFree;
    int showDiff = 1;











    if( isDeleted ){
      fossil_print("DELETED  %s\n", zPathname);
      if( !asNewFile ){ showDiff = 0; zFullName = NULL_DEVICE; }
    }else if( file_access(zFullName, F_OK) ){
      fossil_print("MISSING  %s\n", zPathname);
      if( !asNewFile ){ showDiff = 0; }
    }else if( isNew ){
      fossil_print("ADDED    %s\n", zPathname);
      srcid = 0;
      if( !asNewFile ){ showDiff = 0; }
    }else if( isChnged==3 ){
      fossil_print("ADDED_BY_MERGE %s\n", zPathname);
      srcid = 0;




      if( !asNewFile ){ showDiff = 0; }
    }
    if( showDiff ){
      Blob content;
      int isBin;
      if( !isLink != !file_wd_islink(zFullName) ){
        diff_print_index(zPathname, diffFlags);






<
|

>
>
>
>
>
>
>
>
>
>
>













>
>
>
>







417
418
419
420
421
422
423

424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    int isDeleted = db_column_int(&q, 1);
    int isChnged = db_column_int(&q,2);
    int isNew = db_column_int(&q,3);
    int srcid = db_column_int(&q, 4);
    int isLink = db_column_int(&q, 5);

    const char *zFullName;
    int showDiff = 1;
    Blob fname;

    if( !file_dir_match(pFileDir, zPathname) ) continue;
    if( determine_exec_relative_option(0) ){
      blob_zero(&fname);
      file_relative_name(zPathname, &fname, 1);
    }else{
      blob_set(&fname, g.zLocalRoot);
      blob_append(&fname, zPathname, -1);
    }
    zFullName = blob_str(&fname);
    if( isDeleted ){
      fossil_print("DELETED  %s\n", zPathname);
      if( !asNewFile ){ showDiff = 0; zFullName = NULL_DEVICE; }
    }else if( file_access(zFullName, F_OK) ){
      fossil_print("MISSING  %s\n", zPathname);
      if( !asNewFile ){ showDiff = 0; }
    }else if( isNew ){
      fossil_print("ADDED    %s\n", zPathname);
      srcid = 0;
      if( !asNewFile ){ showDiff = 0; }
    }else if( isChnged==3 ){
      fossil_print("ADDED_BY_MERGE %s\n", zPathname);
      srcid = 0;
      if( !asNewFile ){ showDiff = 0; }
    }else if( isChnged==5 ){
      fossil_print("ADDED_BY_INTEGRATE %s\n", zPathname);
      srcid = 0;
      if( !asNewFile ){ showDiff = 0; }
    }
    if( showDiff ){
      Blob content;
      int isBin;
      if( !isLink != !file_wd_islink(zFullName) ){
        diff_print_index(zPathname, diffFlags);
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451

452
453
454
455
456
457
458
459
460

461
462
463
464
465
466
467
468

469
470


471
472
473
474
475
476
477
478
479
480
481
      }
      isBin = fIncludeBinary ? 0 : looks_like_binary(&content);
      diff_print_index(zPathname, diffFlags);
      diff_file(&content, isBin, zFullName, zPathname, zDiffCmd,
                zBinGlob, fIncludeBinary, diffFlags);
      blob_reset(&content);
    }
    free(zToFree);
  }
  db_finalize(&q);
  db_end_transaction(1);  /* ROLLBACK */
}

/*
** Output the differences between two versions of a single file.
** zFrom and zTo are the check-ins containing the two file versions.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_one_two_versions(
  const char *zFrom,
  const char *zTo,
  const char *zDiffCmd,
  const char *zBinGlob,
  int fIncludeBinary,
  u64 diffFlags,
  const char *zFileTreeName

){
  char *zName;
  Blob fname;
  Blob v1, v2;
  int isLink1, isLink2;
  int isBin1, isBin2;
  if( diffFlags & DIFF_BRIEF ) return;
  file_tree_name(zFileTreeName, &fname, 0, 1);
  zName = blob_str(&fname);

  historical_version_of_file(zFrom, zName, &v1, &isLink1, 0,
                             fIncludeBinary ? 0 : &isBin1, 0);
  historical_version_of_file(zTo, zName, &v2, &isLink2, 0,
                             fIncludeBinary ? 0 : &isBin2, 0);
  if( isLink1 != isLink2 ){
    diff_print_filenames(zName, zName, diffFlags);
    fossil_print("%s",DIFF_CANNOT_COMPUTE_SYMLINK);
  }else{

    diff_file_mem(&v1, &v2, isBin1, isBin2, zName, zDiffCmd,
                  zBinGlob, fIncludeBinary, diffFlags);


  }
  blob_reset(&v1);
  blob_reset(&v2);
  blob_reset(&fname);
}

/*
** Show the difference between two files identified by ManifestFile
** entries.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the






|






|
<








|
<
<
|
|
|
|
<
>

|
|
<
<
<
<
|
|
>
|
|
<
<
|
|
<
<
>
|
|
>
>

|
<
<







469
470
471
472
473
474
475
476
477
478
479
480
481
482
483

484
485
486
487
488
489
490
491
492


493
494
495
496

497
498
499
500




501
502
503
504
505


506
507


508
509
510
511
512
513
514


515
516
517
518
519
520
521
      }
      isBin = fIncludeBinary ? 0 : looks_like_binary(&content);
      diff_print_index(zPathname, diffFlags);
      diff_file(&content, isBin, zFullName, zPathname, zDiffCmd,
                zBinGlob, fIncludeBinary, diffFlags);
      blob_reset(&content);
    }
    blob_reset(&fname);
  }
  db_finalize(&q);
  db_end_transaction(1);  /* ROLLBACK */
}

/*
** Run a diff between the undo buffer and files on disk.

**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_against_undo(


  const char *zDiffCmd,     /* Use this diff command.  NULL for built-in */
  const char *zBinGlob,     /* Treat file names matching this as binary */
  int fIncludeBinary,       /* Treat file names matching this as binary */
  u64 diffFlags,            /* Flags controlling diff output */

  FileDirList *pFileDir     /* List of files and directories to diff */
){
  Stmt q;
  Blob content;




  db_prepare(&q, "SELECT pathname, content FROM undo");
  blob_init(&content, 0, 0);
  while( db_step(&q)==SQLITE_ROW ){
    char *zFullName;
    const char *zFile = (const char*)db_column_text(&q, 0);


    if( !file_dir_match(pFileDir, zFile) ) continue;
    zFullName = mprintf("%s%s", g.zLocalRoot, zFile);


    db_column_blob(&q, 1, &content);
    diff_file(&content, 0, zFullName, zFile,
              zDiffCmd, zBinGlob, fIncludeBinary, diffFlags);
    fossil_free(zFullName);
    blob_reset(&content);
  }
  db_finalize(&q);


}

/*
** Show the difference between two files identified by ManifestFile
** entries.
**
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
532
533
534
535
536
537
538
539
540
541
542
543
544
545

546
547
548
549
550
551
552
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_all_two_versions(
  const char *zFrom,
  const char *zTo,
  const char *zDiffCmd,
  const char *zBinGlob,
  int fIncludeBinary,
  u64 diffFlags

){
  Manifest *pFrom, *pTo;
  ManifestFile *pFromFile, *pToFile;
  int asNewFlag = (diffFlags & DIFF_VERBOSE)!=0 ? 1 : 0;

  pFrom = manifest_get_by_name(zFrom, 0);
  manifest_file_rewind(pFrom);






|





|
>







572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
** Use the internal diff logic if zDiffCmd is NULL.  Otherwise call the
** command zDiffCmd to do the diffing.
**
** When using an external diff program, zBinGlob contains the GLOB patterns
** for file names to treat as binary.  If fIncludeBinary is zero, these files
** will be skipped in addition to files that may contain binary content.
*/
static void diff_two_versions(
  const char *zFrom,
  const char *zTo,
  const char *zDiffCmd,
  const char *zBinGlob,
  int fIncludeBinary,
  u64 diffFlags,
  FileDirList *pFileDir
){
  Manifest *pFrom, *pTo;
  ManifestFile *pFromFile, *pToFile;
  int asNewFlag = (diffFlags & DIFF_VERBOSE)!=0 ? 1 : 0;

  pFrom = manifest_get_by_name(zFrom, 0);
  manifest_file_rewind(pFrom);
561
562
563
564
565
566
567

568
569
570
571

572
573
574

575
576
577
578

579
580
581
582

583
584
585

586
587
588
589
590

591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
      cmp = +1;
    }else if( pToFile==0 ){
      cmp = -1;
    }else{
      cmp = fossil_strcmp(pFromFile->zName, pToFile->zName);
    }
    if( cmp<0 ){

      fossil_print("DELETED %s\n", pFromFile->zName);
      if( asNewFlag ){
        diff_manifest_entry(pFromFile, 0, zDiffCmd, zBinGlob,
                            fIncludeBinary, diffFlags);

      }
      pFromFile = manifest_file_next(pFrom,0);
    }else if( cmp>0 ){

      fossil_print("ADDED   %s\n", pToFile->zName);
      if( asNewFlag ){
        diff_manifest_entry(0, pToFile, zDiffCmd, zBinGlob,
                            fIncludeBinary, diffFlags);

      }
      pToFile = manifest_file_next(pTo,0);
    }else if( fossil_strcmp(pFromFile->zUuid, pToFile->zUuid)==0 ){
      /* No changes */

      pFromFile = manifest_file_next(pFrom,0);
      pToFile = manifest_file_next(pTo,0);
    }else{

      if( diffFlags & DIFF_BRIEF ){
        fossil_print("CHANGED %s\n", pFromFile->zName);
      }else{
        diff_manifest_entry(pFromFile, pToFile, zDiffCmd, zBinGlob,
                            fIncludeBinary, diffFlags);

      }
      pFromFile = manifest_file_next(pFrom,0);
      pToFile = manifest_file_next(pTo,0);
    }
  }
  manifest_destroy(pFrom);
  manifest_destroy(pTo);
}

/*
** Return the name of the external diff command, or return NULL if
** no external diff command is defined.
*/
const char *diff_command_external(int guiDiff){
  char *zDefault;
  const char *zName;

  if( guiDiff ){
#if defined(_WIN32)
    zDefault = "WinDiff.exe";
#else
    zDefault = 0;






>
|
|
|
|
>



>
|
|
|
|
>




>



>
|
|
|
|
|
>














|







602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
      cmp = +1;
    }else if( pToFile==0 ){
      cmp = -1;
    }else{
      cmp = fossil_strcmp(pFromFile->zName, pToFile->zName);
    }
    if( cmp<0 ){
      if( file_dir_match(pFileDir, pFromFile->zName) ){
        fossil_print("DELETED %s\n", pFromFile->zName);
        if( asNewFlag ){
          diff_manifest_entry(pFromFile, 0, zDiffCmd, zBinGlob,
                              fIncludeBinary, diffFlags);
        }
      }
      pFromFile = manifest_file_next(pFrom,0);
    }else if( cmp>0 ){
      if( file_dir_match(pFileDir, pToFile->zName) ){
        fossil_print("ADDED   %s\n", pToFile->zName);
        if( asNewFlag ){
          diff_manifest_entry(0, pToFile, zDiffCmd, zBinGlob,
                              fIncludeBinary, diffFlags);
        }
      }
      pToFile = manifest_file_next(pTo,0);
    }else if( fossil_strcmp(pFromFile->zUuid, pToFile->zUuid)==0 ){
      /* No changes */
      (void)file_dir_match(pFileDir, pFromFile->zName); /* Record name usage */
      pFromFile = manifest_file_next(pFrom,0);
      pToFile = manifest_file_next(pTo,0);
    }else{
      if( file_dir_match(pFileDir, pToFile->zName) ){
        if( diffFlags & DIFF_BRIEF ){
          fossil_print("CHANGED %s\n", pFromFile->zName);
        }else{
          diff_manifest_entry(pFromFile, pToFile, zDiffCmd, zBinGlob,
                              fIncludeBinary, diffFlags);
        }
      }
      pFromFile = manifest_file_next(pFrom,0);
      pToFile = manifest_file_next(pTo,0);
    }
  }
  manifest_destroy(pFrom);
  manifest_destroy(pTo);
}

/*
** Return the name of the external diff command, or return NULL if
** no external diff command is defined.
*/
const char *diff_command_external(int guiDiff){
  const char *zDefault;
  const char *zName;

  if( guiDiff ){
#if defined(_WIN32)
    zDefault = "WinDiff.exe";
#else
    zDefault = 0;
745
746
747
748
749
750
751


752
753
754
755
756
757

758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773

774
775

776
777
778
779
780
781
782
783
784
785

786
787
788
789
790
791



792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819

820
821
822
823
824

825
826




827

















828
829
830


831
832
833
834












835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
**
** Options:
**   --binary PATTERN           Treat files that match the glob PATTERN as binary
**   --branch BRANCH            Show diff of all changes on BRANCH
**   --brief                    Show filenames only
**   --context|-c N             Use N lines of context
**   --diff-binary BOOL         Include binary files when using external commands


**   --from|-r VERSION          select VERSION as source for the diff
**   --internal|-i              use internal diff logic
**   --side-by-side|-y          side-by-side diff
**   --strip-trailing-cr        Strip trailing CR
**   --tk                       Launch a Tcl/Tk GUI for display
**   --to VERSION               select VERSION as target for the diff

**   --unified                  unified diff
**   -v|--verbose               output complete text of added or deleted files
**   -w|--ignore-all-space      Ignore white space when comparing lines
**   -W|--width <num>           Width of lines in side-by-side diff
**   -Z|--ignore-trailing-space Ignore changes to end-of-line whitespace
*/
void diff_cmd(void){
  int isGDiff;               /* True for gdiff.  False for normal diff */
  int isInternDiff;          /* True for internal diff */
  int verboseFlag;           /* True if -v or --verbose flag is used */
  const char *zFrom;         /* Source version number */
  const char *zTo;           /* Target version number */
  const char *zBranch;       /* Branch to diff */
  const char *zDiffCmd = 0;  /* External diff command. NULL for internal diff */
  const char *zBinGlob = 0;  /* Treat file names matching this as binary */
  int fIncludeBinary = 0;    /* Include binary files for external diff */

  u64 diffFlags = 0;         /* Flags to control the DIFF */
  int f;


  if( find_option("tk",0,0)!=0 ){
    diff_tk("diff", 2);
    return;
  }
  isGDiff = g.argv[1][0]=='g';
  isInternDiff = find_option("internal","i",0)!=0;
  zFrom = find_option("from", "r", 1);
  zTo = find_option("to", 0, 1);
  zBranch = find_option("branch", 0, 1);

  diffFlags = diff_options();
  verboseFlag = find_option("verbose","v",0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("new-file","N",0)!=0; /* deprecated */
  }
  if( verboseFlag ) diffFlags |= DIFF_VERBOSE;



  if( zBranch ){
    if( zTo || zFrom ){
      fossil_fatal("cannot use --from or --to with --branch");
    }
    zTo = zBranch;
    zFrom = mprintf("root:%s", zBranch);
  }
  if( zTo==0 ){
    db_must_be_within_tree();
    if( !isInternDiff ){
      zDiffCmd = diff_command_external(isGDiff);
    }
    zBinGlob = diff_get_binary_glob();
    fIncludeBinary = diff_include_binary_files();
    verify_all_options();
    if( g.argc>=3 ){
      for(f=2; f<g.argc; ++f){
        diff_one_against_disk(zFrom, zDiffCmd, zBinGlob, fIncludeBinary,
                              diffFlags, g.argv[f]);
      }
    }else{
      diff_all_against_disk(zFrom, zDiffCmd, zBinGlob, fIncludeBinary,
                            diffFlags);
    }
  }else if( zFrom==0 ){
    fossil_fatal("must use --from if --to is present");
  }else{
    db_find_and_open_repository(0, 0);

    if( !isInternDiff ){
      zDiffCmd = diff_command_external(isGDiff);
    }
    zBinGlob = diff_get_binary_glob();
    fIncludeBinary = diff_include_binary_files();

    verify_all_options();
    if( g.argc>=3 ){




      for(f=2; f<g.argc; ++f){

















        diff_one_two_versions(zFrom, zTo, zDiffCmd, zBinGlob, fIncludeBinary,
                              diffFlags, g.argv[f]);
      }


    }else{
      diff_all_two_versions(zFrom, zTo, zDiffCmd, zBinGlob, fIncludeBinary,
                            diffFlags);
    }












  }
}

/*
** WEBPAGE: vpatch
** URL: /vpatch?from=FROM&to=TO
**
** Show a patch that goes from check-in FROM to check-in TO.
*/
void vpatch_page(void){
  const char *zFrom = P("from");
  const char *zTo = P("to");
  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( zFrom==0 || zTo==0 ) fossil_redirect_home();

  cgi_set_content_type("text/plain");
  diff_all_two_versions(zFrom, zTo, 0, 0, 0, DIFF_VERBOSE);
}






>
>






>
















>

<
>










>






>
>
>







|

<
<
<
<
<
<
<
<
<
<
<
<
<
<
<




>
|
|
|
|
|
>
|
|
>
>
>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
>
>
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>

















|

793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826

827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856















857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
**
** Options:
**   --binary PATTERN           Treat files that match the glob PATTERN as binary
**   --branch BRANCH            Show diff of all changes on BRANCH
**   --brief                    Show filenames only
**   --context|-c N             Use N lines of context
**   --diff-binary BOOL         Include binary files when using external commands
**   --exec-abs-paths           Force absolute path names with external commands.
**   --exec-rel-paths           Force relative path names with external commands.
**   --from|-r VERSION          select VERSION as source for the diff
**   --internal|-i              use internal diff logic
**   --side-by-side|-y          side-by-side diff
**   --strip-trailing-cr        Strip trailing CR
**   --tk                       Launch a Tcl/Tk GUI for display
**   --to VERSION               select VERSION as target for the diff
**   --undo                     Diff against the "undo" buffer
**   --unified                  unified diff
**   -v|--verbose               output complete text of added or deleted files
**   -w|--ignore-all-space      Ignore white space when comparing lines
**   -W|--width <num>           Width of lines in side-by-side diff
**   -Z|--ignore-trailing-space Ignore changes to end-of-line whitespace
*/
void diff_cmd(void){
  int isGDiff;               /* True for gdiff.  False for normal diff */
  int isInternDiff;          /* True for internal diff */
  int verboseFlag;           /* True if -v or --verbose flag is used */
  const char *zFrom;         /* Source version number */
  const char *zTo;           /* Target version number */
  const char *zBranch;       /* Branch to diff */
  const char *zDiffCmd = 0;  /* External diff command. NULL for internal diff */
  const char *zBinGlob = 0;  /* Treat file names matching this as binary */
  int fIncludeBinary = 0;    /* Include binary files for external diff */
  int againstUndo = 0;       /* Diff against files in the undo buffer */
  u64 diffFlags = 0;         /* Flags to control the DIFF */

  FileDirList *pFileDir = 0; /* Restrict the diff to these files */

  if( find_option("tk",0,0)!=0 ){
    diff_tk("diff", 2);
    return;
  }
  isGDiff = g.argv[1][0]=='g';
  isInternDiff = find_option("internal","i",0)!=0;
  zFrom = find_option("from", "r", 1);
  zTo = find_option("to", 0, 1);
  zBranch = find_option("branch", 0, 1);
  againstUndo = find_option("undo",0,0)!=0;
  diffFlags = diff_options();
  verboseFlag = find_option("verbose","v",0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("new-file","N",0)!=0; /* deprecated */
  }
  if( verboseFlag ) diffFlags |= DIFF_VERBOSE;
  if( againstUndo && (zFrom!=0 || zTo!=0 || zBranch!=0) ){
    fossil_fatal("cannot use --undo together with --from or --to or --branch");
  }
  if( zBranch ){
    if( zTo || zFrom ){
      fossil_fatal("cannot use --from or --to with --branch");
    }
    zTo = zBranch;
    zFrom = mprintf("root:%s", zBranch);
  }
  if( zTo==0 || againstUndo ){
    db_must_be_within_tree();















  }else if( zFrom==0 ){
    fossil_fatal("must use --from if --to is present");
  }else{
    db_find_and_open_repository(0, 0);
  }
  if( !isInternDiff ){
    zDiffCmd = diff_command_external(isGDiff);
  }
  zBinGlob = diff_get_binary_glob();
  fIncludeBinary = diff_include_binary_files();
  determine_exec_relative_option(1);
  verify_all_options();
  if( g.argc>=3 ){
    int i;
    Blob fname;
    pFileDir = fossil_malloc( sizeof(*pFileDir) * (g.argc-1) );
    memset(pFileDir, 0, sizeof(*pFileDir) * (g.argc-1));
    for(i=2; i<g.argc; i++){
      file_tree_name(g.argv[i], &fname, 0, 1);
      pFileDir[i-2].zName = fossil_strdup(blob_str(&fname));
      if( strcmp(pFileDir[i-2].zName,".")==0 ){
        pFileDir[0].zName[0] = '.';
        pFileDir[0].zName[1] = 0;
        break;
      }
      pFileDir[i-2].nName = blob_size(&fname);
      pFileDir[i-2].nUsed = 0;
      blob_reset(&fname);
    }
  }
  if( againstUndo ){
    if( db_lget_int("undo_available",0)==0 ){
      fossil_print("No undo or redo is available\n");
      return;
    }
    diff_against_undo(zDiffCmd, zBinGlob, fIncludeBinary,
                      diffFlags, pFileDir);
  }else if( zTo==0 ){
    diff_against_disk(zFrom, zDiffCmd, zBinGlob, fIncludeBinary,
                      diffFlags, pFileDir);
  }else{
    diff_two_versions(zFrom, zTo, zDiffCmd, zBinGlob, fIncludeBinary,
                      diffFlags, pFileDir);
  }
  if( pFileDir ){
    int i;
    for(i=0; pFileDir[i].zName; i++){
      if( pFileDir[i].nUsed==0
       && strcmp(pFileDir[0].zName,".")!=0
       && !file_isdir(g.argv[i+2])
      ){
        fossil_fatal("not found: '%s'", g.argv[i+2]);
      }
      fossil_free(pFileDir[i].zName);
    }
    fossil_free(pFileDir);
  }
}

/*
** WEBPAGE: vpatch
** URL: /vpatch?from=FROM&to=TO
**
** Show a patch that goes from check-in FROM to check-in TO.
*/
void vpatch_page(void){
  const char *zFrom = P("from");
  const char *zTo = P("to");
  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( zFrom==0 || zTo==0 ) fossil_redirect_home();

  cgi_set_content_type("text/plain");
  diff_two_versions(zFrom, zTo, 0, 0, 0, DIFF_VERBOSE, 0);
}
Changes to src/doc.c.
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69


70
71
72
73
74
75
76
** For any other binary type, return "unknown/unknown".
*/
const char *mimetype_from_content(Blob *pBlob){
  int i;
  int n;
  const unsigned char *x;

  static const char isBinary[256] = {
     1, 1, 1, 1,  1, 1, 1, 1,    1, 0, 0, 0,  0, 0, 1, 1,
     1, 1, 1, 1,  1, 1, 1, 1,    1, 1, 0, 0,  1, 1, 1, 1
  };

  /* A table of mimetypes based on file content prefixes
  */
  static const struct {
    const char *zPrefix;       /* The file prefix */
    int size;                  /* Length of the prefix */
    const char *zMimetype;     /* The corresponding mimetype */
  } aMime[] = {
    { "GIF87a",                  6, "image/gif"  },
    { "GIF89a",                  6, "image/gif"  },
    { "\211PNG\r\n\032\n",       8, "image/png"  },
    { "\377\332\377",            3, "image/jpeg" },
    { "\377\330\377",            3, "image/jpeg" },
  };

  x = (const unsigned char*)blob_buffer(pBlob);
  n = blob_size(pBlob);
  for(i=0; i<n; i++){
    unsigned char c = x[i];
    if( isBinary[c] ){
      break;
    }
  }
  if( i>=n ){
    return 0;   /* Plain text */
  }


  for(i=0; i<ArraySize(aMime); i++){
    if( n>=aMime[i].size && memcmp(x, aMime[i].zPrefix, aMime[i].size)==0 ){
      return aMime[i].zMimetype;
    }
  }
  return "unknown/unknown";
}






<
<
<
<
<














<
|
<
<
<
<
<
<
<


>
>







33
34
35
36
37
38
39





40
41
42
43
44
45
46
47
48
49
50
51
52
53

54







55
56
57
58
59
60
61
62
63
64
65
** For any other binary type, return "unknown/unknown".
*/
const char *mimetype_from_content(Blob *pBlob){
  int i;
  int n;
  const unsigned char *x;






  /* A table of mimetypes based on file content prefixes
  */
  static const struct {
    const char *zPrefix;       /* The file prefix */
    int size;                  /* Length of the prefix */
    const char *zMimetype;     /* The corresponding mimetype */
  } aMime[] = {
    { "GIF87a",                  6, "image/gif"  },
    { "GIF89a",                  6, "image/gif"  },
    { "\211PNG\r\n\032\n",       8, "image/png"  },
    { "\377\332\377",            3, "image/jpeg" },
    { "\377\330\377",            3, "image/jpeg" },
  };


  if( !looks_like_binary(pBlob) ) {







    return 0;   /* Plain text */
  }
  x = (const unsigned char*)blob_buffer(pBlob);
  n = blob_size(pBlob);
  for(i=0; i<ArraySize(aMime); i++){
    if( n>=aMime[i].size && memcmp(x, aMime[i].zPrefix, aMime[i].size)==0 ){
      return aMime[i].zMimetype;
    }
  }
  return "unknown/unknown";
}
102
103
104
105
106
107
108

109
110
111
112
113
114
115
  { "ccad",       4, "application/clariscad"             },
  { "cdf",        3, "application/x-netcdf"              },
  { "class",      5, "application/octet-stream"          },
  { "cod",        3, "application/vnd.rim.cod"           },
  { "com",        3, "application/x-msdos-program"       },
  { "cpio",       4, "application/x-cpio"                },
  { "cpt",        3, "application/mac-compactpro"        },

  { "csh",        3, "application/x-csh"                 },
  { "css",        3, "text/css"                          },
  { "csv",        3, "text/csv"                          },
  { "dcr",        3, "application/x-director"            },
  { "deb",        3, "application/x-debian-package"      },
  { "dir",        3, "application/x-director"            },
  { "dl",         2, "video/dl"                          },






>







91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  { "ccad",       4, "application/clariscad"             },
  { "cdf",        3, "application/x-netcdf"              },
  { "class",      5, "application/octet-stream"          },
  { "cod",        3, "application/vnd.rim.cod"           },
  { "com",        3, "application/x-msdos-program"       },
  { "cpio",       4, "application/x-cpio"                },
  { "cpt",        3, "application/mac-compactpro"        },
  { "cs",         2, "text/plain"                        },
  { "csh",        3, "application/x-csh"                 },
  { "css",        3, "text/css"                          },
  { "csv",        3, "text/csv"                          },
  { "dcr",        3, "application/x-director"            },
  { "deb",        3, "application/x-debian-package"      },
  { "dir",        3, "application/x-director"            },
  { "dl",         2, "video/dl"                          },
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185

186
187
188
189
190
191
192
  { "kar",        3, "audio/midi"                        },
  { "latex",      5, "application/x-latex"               },
  { "lha",        3, "application/octet-stream"          },
  { "lsp",        3, "application/x-lisp"                },
  { "lzh",        3, "application/octet-stream"          },
  { "m",          1, "text/plain"                        },
  { "m3u",        3, "audio/x-mpegurl"                   },
  { "man",        3, "application/x-troff-man"           },
  { "markdown",   8, "text/x-markdown"                   },
  { "md",         2, "text/x-markdown"                   },
  { "me",         2, "application/x-troff-me"            },
  { "mesh",       4, "model/mesh"                        },
  { "mid",        3, "audio/midi"                        },
  { "midi",       4, "audio/midi"                        },
  { "mif",        3, "application/x-mif"                 },
  { "mime",       4, "www/mime"                          },
  { "mkd",        3, "text/x-markdown"                   },
  { "mov",        3, "video/quicktime"                   },
  { "movie",      5, "video/x-sgi-movie"                 },
  { "mp2",        3, "audio/mpeg"                        },
  { "mp3",        3, "audio/mpeg"                        },
  { "mp4",        3, "video/mp4"                         },
  { "mpe",        3, "video/mpeg"                        },
  { "mpeg",       4, "video/mpeg"                        },
  { "mpg",        3, "video/mpeg"                        },
  { "mpga",       4, "audio/mpeg"                        },
  { "ms",         2, "application/x-troff-ms"            },
  { "msh",        3, "model/mesh"                        },

  { "nc",         2, "application/x-netcdf"              },
  { "oda",        3, "application/oda"                   },
  { "odp",        3, "application/vnd.oasis.opendocument.presentation" },
  { "ods",        3, "application/vnd.oasis.opendocument.spreadsheet" },
  { "odt",        3, "application/vnd.oasis.opendocument.text" },
  { "ogg",        3, "application/ogg"                   },
  { "ogm",        3, "application/ogg"                   },






|




















>







148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
  { "kar",        3, "audio/midi"                        },
  { "latex",      5, "application/x-latex"               },
  { "lha",        3, "application/octet-stream"          },
  { "lsp",        3, "application/x-lisp"                },
  { "lzh",        3, "application/octet-stream"          },
  { "m",          1, "text/plain"                        },
  { "m3u",        3, "audio/x-mpegurl"                   },
  { "man",        3, "text/plain"                        },
  { "markdown",   8, "text/x-markdown"                   },
  { "md",         2, "text/x-markdown"                   },
  { "me",         2, "application/x-troff-me"            },
  { "mesh",       4, "model/mesh"                        },
  { "mid",        3, "audio/midi"                        },
  { "midi",       4, "audio/midi"                        },
  { "mif",        3, "application/x-mif"                 },
  { "mime",       4, "www/mime"                          },
  { "mkd",        3, "text/x-markdown"                   },
  { "mov",        3, "video/quicktime"                   },
  { "movie",      5, "video/x-sgi-movie"                 },
  { "mp2",        3, "audio/mpeg"                        },
  { "mp3",        3, "audio/mpeg"                        },
  { "mp4",        3, "video/mp4"                         },
  { "mpe",        3, "video/mpeg"                        },
  { "mpeg",       4, "video/mpeg"                        },
  { "mpg",        3, "video/mpeg"                        },
  { "mpga",       4, "audio/mpeg"                        },
  { "ms",         2, "application/x-troff-ms"            },
  { "msh",        3, "model/mesh"                        },
  { "n",          1, "text/plain"                        },
  { "nc",         2, "application/x-netcdf"              },
  { "oda",        3, "application/oda"                   },
  { "odp",        3, "application/vnd.oasis.opendocument.presentation" },
  { "ods",        3, "application/vnd.oasis.opendocument.spreadsheet" },
  { "odt",        3, "application/vnd.oasis.opendocument.text" },
  { "ogg",        3, "application/ogg"                   },
  { "ogm",        3, "application/ogg"                   },
263
264
265
266
267
268
269

270
271
272
273
274
275
276
  { "tr",         2, "application/x-troff"               },
  { "tsi",        3, "audio/TSP-audio"                   },
  { "tsp",        3, "application/dsptype"               },
  { "tsv",        3, "text/tab-separated-values"         },
  { "txt",        3, "text/plain"                        },
  { "unv",        3, "application/i-deas"                },
  { "ustar",      5, "application/x-ustar"               },

  { "vcd",        3, "application/x-cdlink"              },
  { "vda",        3, "application/vda"                   },
  { "viv",        3, "video/vnd.vivo"                    },
  { "vivo",       4, "video/vnd.vivo"                    },
  { "vrml",       4, "model/vrml"                        },
  { "wav",        3, "audio/x-wav"                       },
  { "wax",        3, "audio/x-ms-wax"                    },






>







254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
  { "tr",         2, "application/x-troff"               },
  { "tsi",        3, "audio/TSP-audio"                   },
  { "tsp",        3, "application/dsptype"               },
  { "tsv",        3, "text/tab-separated-values"         },
  { "txt",        3, "text/plain"                        },
  { "unv",        3, "application/i-deas"                },
  { "ustar",      5, "application/x-ustar"               },
  { "vb",         2, "text/plain"                        },
  { "vcd",        3, "application/x-cdlink"              },
  { "vda",        3, "application/vda"                   },
  { "viv",        3, "video/vnd.vivo"                    },
  { "vivo",       4, "video/vnd.vivo"                    },
  { "vrml",       4, "model/vrml"                        },
  { "wav",        3, "audio/x-wav"                       },
  { "wax",        3, "audio/x-ms-wax"                    },
494
495
496
497
498
499
500






























501
502
503
504
505
506
507
  rid = db_int(0, "SELECT rid FROM vcache"
                  " WHERE vid=%d AND fname=%Q", vid, zName);
  if( rid && content_get(rid, pContent)==0 ){
    rid = 0;
  }
  return rid;
}































/*
** WEBPAGE: doc
** URL: /doc?name=CHECKIN/FILE
** URL: /doc/CHECKIN/FILE
**
** CHECKIN can be either tag or SHA1 hash or timestamp identifying a






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
  rid = db_int(0, "SELECT rid FROM vcache"
                  " WHERE vid=%d AND fname=%Q", vid, zName);
  if( rid && content_get(rid, pContent)==0 ){
    rid = 0;
  }
  return rid;
}

/*
** Transfer content to the output.  During the transfer, when text of
** the followign form is seen:
**
**       href="$ROOT/
**       action="$ROOT/
**
** Convert $ROOT to the root URI of the repository.  Allow ' in place of "
** and any case for href.
*/
static void convert_href_and_output(Blob *pIn){
  int i, base;
  int n = blob_size(pIn);
  char *z = blob_buffer(pIn);
  for(base=0, i=7; i<n; i++){
    if( z[i]=='$' 
     && strncmp(&z[i],"$ROOT/", 6)==0
     && (z[i-1]=='\'' || z[i-1]=='"')
     && i-base>=9
     && (fossil_strnicmp(&z[i-7]," href=", 6)==0 ||
           fossil_strnicmp(&z[i-9]," action=", 8)==0)
    ){
      blob_append(cgi_output_blob(), &z[base], i-base);
      blob_appendf(cgi_output_blob(), "%R");
      base = i+5;
    }
  }
  blob_append(cgi_output_blob(), &z[base], i-base);
}

/*
** WEBPAGE: doc
** URL: /doc?name=CHECKIN/FILE
** URL: /doc/CHECKIN/FILE
**
** CHECKIN can be either tag or SHA1 hash or timestamp identifying a
520
521
522
523
524
525
526
527
528


529
530














531
532
533
534
535
536
537
538
539
540
541
542
543
544



545
546
547
548
549
550
551
**
** The "ckout" CHECKIN is intended for development - to provide a mechanism
** for looking at what a file will look like using the /doc webpage after
** it gets checked in.
**
** The file extension is used to decide how to render the file.
**
** If FILE ends in "/" then names "FILE/index.html", "FILE/index.wiki",
** and "FILE/index.md" are tried in that order.  If none of those are found,


** then FILE is completely replaced by "404.md" and tried.  If that is
** not found, then a default 404 screen is generated.














*/
void doc_page(void){
  const char *zName;                /* Argument to the /doc page */
  const char *zOrigName = "?";      /* Original document name */
  const char *zMime;                /* Document MIME type */
  char *zCheckin = "tip";           /* The check-in holding the document */
  int vid = 0;                      /* Artifact of check-in */
  int rid = 0;                      /* Artifact of file */
  int i;                            /* Loop counter */
  Blob filebody;                    /* Content of the documentation file */
  Blob title;                       /* Document title */
  int nMiss = (-1);                 /* Failed attempts to find the document */
  static const char *const azSuffix[] = {
     "index.html", "index.wiki", "index.md"



  };

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  blob_init(&title, 0, 0);
  db_begin_transaction();
  while( rid==0 && (++nMiss)<=ArraySize(azSuffix) ){






|
|
>
>
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>














>
>
>







542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
**
** The "ckout" CHECKIN is intended for development - to provide a mechanism
** for looking at what a file will look like using the /doc webpage after
** it gets checked in.
**
** The file extension is used to decide how to render the file.
**
** If FILE ends in "/" then the names "FILE/index.html", "FILE/index.wiki",
** and "FILE/index.md" are tried in that order.  If the binary was compiled
** with TH1 embedded documentation support and the "th1-docs" setting is
** enabled, the name "FILE/index.th1" is also tried.  If none of those are
** found, then FILE is completely replaced by "404.md" and tried.  If that
** is not found, then a default 404 screen is generated.
**
** Headers and footers are added for text/x-fossil-wiki and text/md
** If the document has mimetype text/html then headers and footers are
** usually not added.  However, a text/html document begins with the
** following div:
**
**       <div class='fossil-doc' data-title='TEXT'>
**
** then headers and footers are supplied.  The optional data-title field
** specifies the title of the document in that case.
**
** For fossil-doc documents and for markdown documents, text of the
** form:  "href='$ROOT/" or "action='$ROOT" has the $ROOT name expanded
** to the top-level of the repository.
*/
void doc_page(void){
  const char *zName;                /* Argument to the /doc page */
  const char *zOrigName = "?";      /* Original document name */
  const char *zMime;                /* Document MIME type */
  char *zCheckin = "tip";           /* The check-in holding the document */
  int vid = 0;                      /* Artifact of check-in */
  int rid = 0;                      /* Artifact of file */
  int i;                            /* Loop counter */
  Blob filebody;                    /* Content of the documentation file */
  Blob title;                       /* Document title */
  int nMiss = (-1);                 /* Failed attempts to find the document */
  static const char *const azSuffix[] = {
     "index.html", "index.wiki", "index.md"
#ifdef FOSSIL_ENABLE_TH1_DOCS
      , "index.th1"
#endif
  };

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  blob_init(&title, 0, 0);
  db_begin_transaction();
  while( rid==0 && (++nMiss)<=ArraySize(azSuffix) ){
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
    markdown_to_html(&filebody, &title, &tail);
    if( blob_size(&title)>0 ){
      style_header("%s", blob_str(&title));
    }else{
      style_header("%s", nMiss>=ArraySize(azSuffix)?
                        "Not Found" : "Documentation");
    }
    blob_append(cgi_output_blob(), blob_buffer(&tail), blob_size(&tail));
    style_footer();
  }else if( fossil_strcmp(zMime, "text/plain")==0 ){
    style_header("Documentation");
    @ <blockquote><pre>
    @ %h(blob_str(&filebody))
    @ </pre></blockquote>
    style_footer();
  }else if( fossil_strcmp(zMime, "text/html")==0
            && doc_is_embedded_html(&filebody, &title) ){
    if( blob_size(&title)==0 ) blob_append(&title,zName,-1);
    style_header("%s", blob_str(&title));
    blob_append(cgi_output_blob(), blob_buffer(&filebody),blob_size(&filebody));
    style_footer();
#ifdef FOSSIL_ENABLE_TH1_DOCS
  }else if( Th_AreDocsEnabled() &&
            fossil_strcmp(zMime, "application/x-th1")==0 ){
    style_header("%h", zName);
    Th_Render(blob_str(&filebody));
    style_footer();






|











|







665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
    markdown_to_html(&filebody, &title, &tail);
    if( blob_size(&title)>0 ){
      style_header("%s", blob_str(&title));
    }else{
      style_header("%s", nMiss>=ArraySize(azSuffix)?
                        "Not Found" : "Documentation");
    }
    convert_href_and_output(&tail);
    style_footer();
  }else if( fossil_strcmp(zMime, "text/plain")==0 ){
    style_header("Documentation");
    @ <blockquote><pre>
    @ %h(blob_str(&filebody))
    @ </pre></blockquote>
    style_footer();
  }else if( fossil_strcmp(zMime, "text/html")==0
            && doc_is_embedded_html(&filebody, &title) ){
    if( blob_size(&title)==0 ) blob_append(&title,zName,-1);
    style_header("%s", blob_str(&title));
    convert_href_and_output(&filebody);
    style_footer();
#ifdef FOSSIL_ENABLE_TH1_DOCS
  }else if( Th_AreDocsEnabled() &&
            fossil_strcmp(zMime, "application/x-th1")==0 ){
    style_header("%h", zName);
    Th_Render(blob_str(&filebody));
    style_footer();
Changes to src/event.c.
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79

80
81
82
83
84
85
86
**  v=BOOLEAN        // Show details if TRUE.  Default is FALSE.  Optional.
**
** Display an existing event identified by EVENTID
*/
void event_page(void){
  int rid = 0;             /* rid of the event artifact */
  char *zUuid;             /* UUID corresponding to rid */
  const char *zId;    /* Event identifier */
  const char *zVerbose;    /* Value of verbose option */
  char *zETime;            /* Time of the tech-note */
  char *zATime;            /* Time the artifact was created */
  int specRid;             /* rid specified by aid= parameter */
  int prevRid, nextRid;    /* Previous or next edits of this tech-note */
  Manifest *pTNote;        /* Parsed technote artifact */
  Blob fullbody;           /* Complete content of the technote body */
  Blob title;              /* Title extracted from the technote body */
  Blob tail;               /* Event body that comes after the title */
  Stmt q1;                 /* Query to search for the technote */
  int verboseFlag;         /* True to show details */
  const char *zMimetype = 0;  /* Mimetype of the document */



  /* wiki-read privilege is needed in order to read tech-notes.
  */
  login_check_credentials();
  if( !g.perm.RdWiki ){
    login_needed(g.anon.RdWiki);






|












>







60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
**  v=BOOLEAN        // Show details if TRUE.  Default is FALSE.  Optional.
**
** Display an existing event identified by EVENTID
*/
void event_page(void){
  int rid = 0;             /* rid of the event artifact */
  char *zUuid;             /* UUID corresponding to rid */
  const char *zId;         /* Event identifier */
  const char *zVerbose;    /* Value of verbose option */
  char *zETime;            /* Time of the tech-note */
  char *zATime;            /* Time the artifact was created */
  int specRid;             /* rid specified by aid= parameter */
  int prevRid, nextRid;    /* Previous or next edits of this tech-note */
  Manifest *pTNote;        /* Parsed technote artifact */
  Blob fullbody;           /* Complete content of the technote body */
  Blob title;              /* Title extracted from the technote body */
  Blob tail;               /* Event body that comes after the title */
  Stmt q1;                 /* Query to search for the technote */
  int verboseFlag;         /* True to show details */
  const char *zMimetype = 0;  /* Mimetype of the document */
  const char *zFullId;     /* Full event identifier */


  /* wiki-read privilege is needed in order to read tech-notes.
  */
  login_check_credentials();
  if( !g.perm.RdWiki ){
    login_needed(g.anon.RdWiki);
148
149
150
151
152
153
154





155
156
157
158
159
160
161
  }else{
    blob_appendf(&title, "Tech-note %S", zId);
    tail = fullbody;
  }
  style_header("%s", blob_str(&title));
  if( g.perm.WrWiki && g.perm.Write && nextRid==0 ){
    style_submenu_element("Edit", 0, "%R/technoteedit?name=%!S", zId);





  }
  zETime = db_text(0, "SELECT datetime(%.17g)", pTNote->rEventDate);
  style_submenu_element("Context", 0, "%R/timeline?c=%.20s", zId);
  if( g.perm.Hyperlink ){
    if( verboseFlag ){
      style_submenu_element("Plain", 0,
                            "%R/technote?name=%!S&aid=%s&mimetype=text/plain",






>
>
>
>
>







149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
  }else{
    blob_appendf(&title, "Tech-note %S", zId);
    tail = fullbody;
  }
  style_header("%s", blob_str(&title));
  if( g.perm.WrWiki && g.perm.Write && nextRid==0 ){
    style_submenu_element("Edit", 0, "%R/technoteedit?name=%!S", zId);
    if( g.perm.Attach ){
      style_submenu_element("Attach", "Add an attachment",
           "%R/attachadd?technote=%!S&from=%R/technote/%!S", 
           zId, zId);
    }
  }
  zETime = db_text(0, "SELECT datetime(%.17g)", pTNote->rEventDate);
  style_submenu_element("Context", 0, "%R/timeline?c=%.20s", zId);
  if( g.perm.Hyperlink ){
    if( verboseFlag ){
      style_submenu_element("Plain", 0,
                            "%R/technote?name=%!S&aid=%s&mimetype=text/plain",
214
215
216
217
218
219
220





221
222
223









































































































224
225
226
227
228
229
230
  }else if( fossil_strcmp(zMimetype, "text/x-markdown")==0 ){
    cgi_append_content(blob_buffer(&tail), blob_size(&tail));
  }else{
    @ <pre>
    @ %h(blob_str(&fullbody))
    @ </pre>
  }





  style_footer();
  manifest_destroy(pTNote);
}










































































































/*
** WEBPAGE: technoteedit
** WEBPAGE: eventedit
**
** Revise or create a technical note (formerly called an 'event').
**






>
>
>
>
>



>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  }else if( fossil_strcmp(zMimetype, "text/x-markdown")==0 ){
    cgi_append_content(blob_buffer(&tail), blob_size(&tail));
  }else{
    @ <pre>
    @ %h(blob_str(&fullbody))
    @ </pre>
  }
  zFullId = db_text(0, "SELECT SUBSTR(tagname,7)"
                       "  FROM tag"
                       " WHERE tagname GLOB 'event-%q*'",
                    zId);
  attachment_list(zFullId, "<hr /><h2>Attachments:</h2><ul>");
  style_footer();
  manifest_destroy(pTNote);
}

/*
** Add or update a new tech note to the repository.  rid is id of
** the prior version of this technote, if any. 
**
** returns 1 if the tech note was added or updated, 0 if the
** update failed making an invalid artifact
*/
int event_commit_common(
  int rid,                 /* id of the prior version of the technote */
  const char *zId,         /* hash label for the technote */
  const char *zBody,       /* content of the technote */
  char *zETime,            /* timestamp for the technote */
  const char *zMimetype,   /* mimetype for the technote N-card */
  const char *zComment,    /* comment shown on the timeline */
  const char *zTags,       /* tags associated with this technote */
  const char *zClr         /* Background color */
){
  Blob event;
  char *zDate;
  Blob cksum;
  int nrid, n;

  blob_init(&event, 0, 0);
  db_begin_transaction();
  while( fossil_isspace(zComment[0]) ) zComment++;
  n = strlen(zComment);
  while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; }
  if( n>0 ){
    blob_appendf(&event, "C %#F\n", n, zComment);
  }
  zDate = date_in_standard_format("now");
  blob_appendf(&event, "D %s\n", zDate);
  free(zDate);
  
  zETime[10] = 'T';
  blob_appendf(&event, "E %s %s\n", zETime, zId);
  zETime[10] = ' ';
  if( rid ){
    char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
    blob_appendf(&event, "P %s\n", zUuid);
    free(zUuid);
  }
  if( zMimetype && zMimetype[0] ){
    blob_appendf(&event, "N %s\n", zMimetype);
  }
  if( zClr && zClr[0] ){
    blob_appendf(&event, "T +bgcolor * %F\n", zClr);
  }
  if( zTags && zTags[0] ){
    Blob tags, one;
    int i, j;
    Stmt q;
    char *zBlob;

    /* Load the tags string into a blob */
    blob_zero(&tags);
    blob_append(&tags, zTags, -1);

    /* Collapse all sequences of whitespace and "," characters into
    ** a single space character */
    zBlob = blob_str(&tags);
    for(i=j=0; zBlob[i]; i++, j++){
      if( fossil_isspace(zBlob[i]) || zBlob[i]==',' ){
        while( fossil_isspace(zBlob[i+1]) ){ i++; }
        zBlob[j] = ' ';
      }else{
        zBlob[j] = zBlob[i];
      }
    }
    blob_resize(&tags, j);

    /* Parse out each tag and load it into a temporary table for sorting */
    db_multi_exec("CREATE TEMP TABLE newtags(x);");
    while( blob_token(&tags, &one) ){
      db_multi_exec("INSERT INTO newtags VALUES(%B)", &one);
    }
    blob_reset(&tags);

    /* Extract the tags in sorted order and make an entry in the
    ** artifact for each. */
    db_prepare(&q, "SELECT x FROM newtags ORDER BY x");
    while( db_step(&q)==SQLITE_ROW ){
      blob_appendf(&event, "T +sym-%F *\n", db_column_text(&q, 0));
    }
    db_finalize(&q);
  }
  if( !login_is_nobody() ){
    blob_appendf(&event, "U %F\n", login_name());
  }
  blob_appendf(&event, "W %d\n%s\n", strlen(zBody), zBody);
  md5sum_blob(&event, &cksum);
  blob_appendf(&event, "Z %b\n", &cksum);
  blob_reset(&cksum);
  nrid = content_put(&event);
  db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid);
  if( manifest_crosslink(nrid, &event, MC_NONE)==0 ){
    db_end_transaction(1);
    return 0;
  }
  assert( blob_is_reset(&event) );
  content_deltify(rid, nrid, 0);
  db_end_transaction(0);
  return 1;
}

/*
** WEBPAGE: technoteedit
** WEBPAGE: eventedit
**
** Revise or create a technical note (formerly called an 'event').
**
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
        "   AND tag.tagname GLOB 'sym-*'",
        rid
      );
    }
  }
  zETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", zETime);
  if( P("submit")!=0 && (zBody!=0 && zComment!=0) ){
    char *zDate;
    Blob cksum;
    int nrid, n;
    blob_init(&event, 0, 0);
    db_begin_transaction();
    login_verify_csrf_secret();
    while( fossil_isspace(zComment[0]) ) zComment++;
    n = strlen(zComment);
    while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; }
    if( n>0 ){
      blob_appendf(&event, "C %#F\n", n, zComment);
    }
    zDate = date_in_standard_format("now");
    blob_appendf(&event, "D %s\n", zDate);
    free(zDate);
    zETime[10] = 'T';
    blob_appendf(&event, "E %s %s\n", zETime, zId);
    zETime[10] = ' ';
    if( rid ){
      char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      blob_appendf(&event, "P %s\n", zUuid);
      free(zUuid);
    }
    if( zMimetype && zMimetype[0] ){
      blob_appendf(&event, "N %s\n", zMimetype);
    }
    if( zClr && zClr[0] ){
      blob_appendf(&event, "T +bgcolor * %F\n", zClr);
    }
    if( zTags && zTags[0] ){
      Blob tags, one;
      int i, j;
      Stmt q;
      char *zBlob;

      /* Load the tags string into a blob */
      blob_zero(&tags);
      blob_append(&tags, zTags, -1);

      /* Collapse all sequences of whitespace and "," characters into
      ** a single space character */
      zBlob = blob_str(&tags);
      for(i=j=0; zBlob[i]; i++, j++){
        if( fossil_isspace(zBlob[i]) || zBlob[i]==',' ){
          while( fossil_isspace(zBlob[i+1]) ){ i++; }
          zBlob[j] = ' ';
        }else{
          zBlob[j] = zBlob[i];
        }
      }
      blob_resize(&tags, j);

      /* Parse out each tag and load it into a temporary table for sorting */
      db_multi_exec("CREATE TEMP TABLE newtags(x);");
      while( blob_token(&tags, &one) ){
        db_multi_exec("INSERT INTO newtags VALUES(%B)", &one);
      }
      blob_reset(&tags);

      /* Extract the tags in sorted order and make an entry in the
      ** artifact for each. */
      db_prepare(&q, "SELECT x FROM newtags ORDER BY x");
      while( db_step(&q)==SQLITE_ROW ){
        blob_appendf(&event, "T +sym-%F *\n", db_column_text(&q, 0));
      }
      db_finalize(&q);
    }
    if( !login_is_nobody() ){
      blob_appendf(&event, "U %F\n", login_name());
    }
    blob_appendf(&event, "W %d\n%s\n", strlen(zBody), zBody);
    md5sum_blob(&event, &cksum);
    blob_appendf(&event, "Z %b\n", &cksum);
    blob_reset(&cksum);
    nrid = content_put(&event);
    db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid);
    if( manifest_crosslink(nrid, &event, MC_NONE)==0 ){
      db_end_transaction(1);
      style_header("Error");
      @ Internal error:  Fossil tried to make an invalid artifact for
      @ the edited technode.
      style_footer();
      return;
    }
    assert( blob_is_reset(&event) );
    content_deltify(rid, nrid, 0);
    db_end_transaction(0);
    cgi_redirectf("technote?name=%T", zId);
  }
  if( P("cancel")!=0 ){
    cgi_redirectf("technote?name=%T", zId);
    return;
  }
  if( zBody==0 ){






<
<
<
<
<

<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<


|



<
<
<







437
438
439
440
441
442
443





444




























445



446







































447
448
449
450
451
452



453
454
455
456
457
458
459
        "   AND tag.tagname GLOB 'sym-*'",
        rid
      );
    }
  }
  zETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", zETime);
  if( P("submit")!=0 && (zBody!=0 && zComment!=0) ){





    login_verify_csrf_secret();




























    if ( !event_commit_common(rid, zId, zBody, zETime,



                              zMimetype, zComment, zTags, zClr) ){







































      style_header("Error");
      @ Internal error:  Fossil tried to make an invalid artifact for
      @ the edited technote.
      style_footer();
      return;
    }



    cgi_redirectf("technote?name=%T", zId);
  }
  if( P("cancel")!=0 ){
    cgi_redirectf("technote?name=%T", zId);
    return;
  }
  if( zBody==0 ){
495
496
497
498
499
500
501
























































  @ <input type="submit" name="preview" value="Preview Your Changes" />
  @ <input type="submit" name="submit" value="Apply These Changes" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </td></tr></table>
  @ </div></form>
  style_footer();
}






























































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
  @ <input type="submit" name="preview" value="Preview Your Changes" />
  @ <input type="submit" name="submit" value="Apply These Changes" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </td></tr></table>
  @ </div></form>
  style_footer();
}

/*
** Add a new tech note to the repository.  The timestamp is
** given by the zETime parameter.  isNew must be true to create
** a new page.  If no previous page with the name zPageName exists
** and isNew is false, then this routine throws an error.
*/
void event_cmd_commit(
  char *zETime,             /* timestamp */
  int isNew,                /* true to create a new page */
  Blob *pContent,           /* content of the new page */
  const char *zMimeType,    /* mimetype of the content */
  const char *zComment,     /* comment to go on the timeline */
  const char *zTags,        /* tags */
  const char *zClr          /* background color */
){
  int rid;                /* Artifact id of the tech note */
  const char *zId;        /* id of the tech note */
  rid = db_int(0, "SELECT objid FROM event"
        " WHERE datetime(mtime)=datetime('%q') AND type = 'e'"
        " LIMIT 1",
        zETime
  );
  if( rid==0 && !isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
#endif
    fossil_fatal("no such tech note: %s", zETime);
  }
  if( rid!=0 && isNew ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS;
#endif
    fossil_fatal("tech note %s already exists", zETime);
  }

  if ( isNew ){
    zId = db_text(0, "SELECT lower(hex(randomblob(20)))");
  }else{
    zId = db_text(0,
      "SELECT substr(tagname,7) FROM tag"
      " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')",
      rid
    );
  }

  user_select();
  if (event_commit_common(rid, zId, blob_str(pContent), zETime,
      zMimeType, zComment, zTags, zClr)==0 ){
#ifdef FOSSIL_ENABLE_JSON
    g.json.resultCode = FSL_JSON_E_ASSERT;
#endif
    fossil_fatal("Internal error: Fossil tried to make an "
                 "invalid artifact for the technote.");
  }
}
Changes to src/file.c.
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
** Fill stat buf with information received from stat() or lstat().
** lstat() is called on Unix if isWd is TRUE and allow-symlinks setting is on.
**
*/
static int fossil_stat(const char *zFilename, struct fossilStat *buf, int isWd){
  int rc;
  void *zMbcs = fossil_utf8_to_filename(zFilename);
#if !defined(_WIN32)
  if( isWd && g.allowSymlinks ){
    rc = lstat(zMbcs, buf);
  }else{
    rc = stat(zMbcs, buf);
  }
#else
  rc = win32_stat(zMbcs, buf, isWd);
#endif
  fossil_filename_free(zMbcs);
  return rc;
}

/*
** Fill in the fileStat variable for the file named zFilename.
** If zFilename==0, then use the previous value of fileStat if
** there is a previous value.






|









|







85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
** Fill stat buf with information received from stat() or lstat().
** lstat() is called on Unix if isWd is TRUE and allow-symlinks setting is on.
**
*/
static int fossil_stat(const char *zFilename, struct fossilStat *buf, int isWd){
  int rc;
  void *zMbcs = fossil_utf8_to_path(zFilename, 0);
#if !defined(_WIN32)
  if( isWd && g.allowSymlinks ){
    rc = lstat(zMbcs, buf);
  }else{
    rc = stat(zMbcs, buf);
  }
#else
  rc = win32_stat(zMbcs, buf, isWd);
#endif
  fossil_path_free(zMbcs);
  return rc;
}

/*
** Fill in the fileStat variable for the file named zFilename.
** If zFilename==0, then use the previous value of fileStat if
** there is a previous value.
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354

/*
** Wrapper around the access() system call.
*/
int file_access(const char *zFilename, int flags){
  int rc;
  void *zMbcs = fossil_utf8_to_filename(zFilename);
#ifdef _WIN32
  rc = win32_access(zMbcs, flags);
#else
  rc = access(zMbcs, flags);
#endif
  fossil_filename_free(zMbcs);
  return rc;
}

/*
** Wrapper around the chdir() system call.
** If bChroot=1, do a chroot to this dir as well
** (UNIX only)
*/
int file_chdir(const char *zChDir, int bChroot){
  int rc;
  void *zPath = fossil_utf8_to_filename(zChDir);
#ifdef _WIN32
  rc = win32_chdir(zPath, bChroot);
#else
  rc = chdir(zPath);
  if( !rc && bChroot ){
    rc = chroot(zPath);
    if( !rc ) rc = chdir("/");
  }
#endif
  fossil_filename_free(zPath);
  return rc;
}

/*
** Find an unused filename similar to zBase with zSuffix appended.
**
** Make the name relative to the working directory if relFlag is true.






|





|










|









|







313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354

/*
** Wrapper around the access() system call.
*/
int file_access(const char *zFilename, int flags){
  int rc;
  void *zMbcs = fossil_utf8_to_path(zFilename, 0);
#ifdef _WIN32
  rc = win32_access(zMbcs, flags);
#else
  rc = access(zMbcs, flags);
#endif
  fossil_path_free(zMbcs);
  return rc;
}

/*
** Wrapper around the chdir() system call.
** If bChroot=1, do a chroot to this dir as well
** (UNIX only)
*/
int file_chdir(const char *zChDir, int bChroot){
  int rc;
  void *zPath = fossil_utf8_to_path(zChDir, 1);
#ifdef _WIN32
  rc = win32_chdir(zPath, bChroot);
#else
  rc = chdir(zPath);
  if( !rc && bChroot ){
    rc = chroot(zPath);
    if( !rc ) rc = chdir("/");
  }
#endif
  fossil_path_free(zPath);
  return rc;
}

/*
** Find an unused filename similar to zBase with zSuffix appended.
**
** Make the name relative to the working directory if relFlag is true.
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
void file_set_mtime(const char *zFilename, i64 newMTime){
#if !defined(_WIN32)
  char *zMbcs;
  struct timeval tv[2];
  memset(tv, 0, sizeof(tv[0])*2);
  tv[0].tv_sec = newMTime;
  tv[1].tv_sec = newMTime;
  zMbcs = fossil_utf8_to_filename(zFilename);
  utimes(zMbcs, tv);
#else
  struct _utimbuf tb;
  wchar_t *zMbcs = fossil_utf8_to_filename(zFilename);
  tb.actime = newMTime;
  tb.modtime = newMTime;
  _wutime(zMbcs, &tb);
#endif
  fossil_filename_free(zMbcs);
}

/*
** COMMAND: test-set-mtime
**
** Usage: %fossil test-set-mtime FILENAME DATE/TIME
**






|



|




|







488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
void file_set_mtime(const char *zFilename, i64 newMTime){
#if !defined(_WIN32)
  char *zMbcs;
  struct timeval tv[2];
  memset(tv, 0, sizeof(tv[0])*2);
  tv[0].tv_sec = newMTime;
  tv[1].tv_sec = newMTime;
  zMbcs = fossil_utf8_to_path(zFilename, 0);
  utimes(zMbcs, tv);
#else
  struct _utimbuf tb;
  wchar_t *zMbcs = fossil_utf8_to_path(zFilename, 0);
  tb.actime = newMTime;
  tb.modtime = newMTime;
  _wutime(zMbcs, &tb);
#endif
  fossil_path_free(zMbcs);
}

/*
** COMMAND: test-set-mtime
**
** Usage: %fossil test-set-mtime FILENAME DATE/TIME
**
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
** Delete a file.
**
** Returns zero upon success.
*/
int file_delete(const char *zFilename){
  int rc;
#ifdef _WIN32
  wchar_t *z = fossil_utf8_to_filename(zFilename);
  rc = _wunlink(z);
#else
  char *z = fossil_utf8_to_filename(zFilename);
  rc = unlink(zFilename);
#endif
  fossil_filename_free(z);
  return rc;
}

/*
** Create the directory named in the argument, if it does not already
** exist.  If forceFlag is 1, delete any prior non-directory object
** with the same name.
**
** Return the number of errors.
*/
int file_mkdir(const char *zName, int forceFlag){
  int rc = file_wd_isdir(zName);
  if( rc==2 ){
    if( !forceFlag ) return 1;
    file_delete(zName);
  }
  if( rc!=1 ){
#if defined(_WIN32)
    wchar_t *zMbcs = fossil_utf8_to_filename(zName);
    rc = _wmkdir(zMbcs);
#else
    char *zMbcs = fossil_utf8_to_filename(zName);
    rc = mkdir(zName, 0755);
#endif
    fossil_filename_free(zMbcs);
    return rc;
  }
  return 0;
}

/*
** Create the tree of directories in which zFilename belongs, if that sequence






|


|


|


















|


|


|







531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
** Delete a file.
**
** Returns zero upon success.
*/
int file_delete(const char *zFilename){
  int rc;
#ifdef _WIN32
  wchar_t *z = fossil_utf8_to_path(zFilename, 0);
  rc = _wunlink(z);
#else
  char *z = fossil_utf8_to_path(zFilename, 0);
  rc = unlink(zFilename);
#endif
  fossil_path_free(z);
  return rc;
}

/*
** Create the directory named in the argument, if it does not already
** exist.  If forceFlag is 1, delete any prior non-directory object
** with the same name.
**
** Return the number of errors.
*/
int file_mkdir(const char *zName, int forceFlag){
  int rc = file_wd_isdir(zName);
  if( rc==2 ){
    if( !forceFlag ) return 1;
    file_delete(zName);
  }
  if( rc!=1 ){
#if defined(_WIN32)
    wchar_t *zMbcs = fossil_utf8_to_path(zName, 1);
    rc = _wmkdir(zMbcs);
#else
    char *zMbcs = fossil_utf8_to_path(zName, 1);
    rc = mkdir(zName, 0755);
#endif
    fossil_path_free(zMbcs);
    return rc;
  }
  return 0;
}

/*
** Create the tree of directories in which zFilename belongs, if that sequence
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
** Returns zero upon success.
*/
int file_rmdir(const char *zName){
  int rc = file_wd_isdir(zName);
  if( rc==2 ) return 1; /* cannot remove normal file */
  if( rc==1 ){
#if defined(_WIN32)
    wchar_t *zMbcs = fossil_utf8_to_filename(zName);
    rc = _wrmdir(zMbcs);
#else
    char *zMbcs = fossil_utf8_to_filename(zName);
    rc = rmdir(zName);
#endif
    fossil_filename_free(zMbcs);
    return rc;
  }
  return 0;
}

/*
** Return true if the filename given is a valid filename for






|


|


|







621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
** Returns zero upon success.
*/
int file_rmdir(const char *zName){
  int rc = file_wd_isdir(zName);
  if( rc==2 ) return 1; /* cannot remove normal file */
  if( rc==1 ){
#if defined(_WIN32)
    wchar_t *zMbcs = fossil_utf8_to_path(zName, 1);
    rc = _wrmdir(zMbcs);
#else
    char *zMbcs = fossil_utf8_to_path(zName, 1);
    rc = rmdir(zName);
#endif
    fossil_path_free(zMbcs);
    return rc;
  }
  return 0;
}

/*
** Return true if the filename given is a valid filename for
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
  const char *zDir = ".";
  int cnt = 0;

#if defined(_WIN32)
  wchar_t zTmpPath[MAX_PATH];

  if( GetTempPathW(MAX_PATH, zTmpPath) ){
    azDirs[0] = fossil_filename_to_utf8(zTmpPath);
  }

  azDirs[1] = fossil_getenv("TEMP");
  azDirs[2] = fossil_getenv("TMP");
#endif








|







1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
  const char *zDir = ".";
  int cnt = 0;

#if defined(_WIN32)
  wchar_t zTmpPath[MAX_PATH];

  if( GetTempPathW(MAX_PATH, zTmpPath) ){
    azDirs[0] = fossil_path_to_utf8(zTmpPath);
  }

  azDirs[1] = fossil_getenv("TEMP");
  azDirs[2] = fossil_getenv("TMP");
#endif


1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
    for(i=0; i<15; i++, j++){
      zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
    }
    zBuf[j] = 0;
  }while( file_size(zBuf)>=0 );

#if defined(_WIN32)
  fossil_filename_free((char *)azDirs[0]);
  fossil_filename_free((char *)azDirs[1]);
  fossil_filename_free((char *)azDirs[2]);
#endif
}


/*
** Return true if a file named zName exists and has identical content
** to the blob pContent.  If zName does not exist or if the content is






|
|
|







1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
    for(i=0; i<15; i++, j++){
      zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ];
    }
    zBuf[j] = 0;
  }while( file_size(zBuf)>=0 );

#if defined(_WIN32)
  fossil_path_free((char *)azDirs[0]);
  fossil_path_free((char *)azDirs[1]);
  fossil_path_free((char *)azDirs[2]);
#endif
}


/*
** Return true if a file named zName exists and has identical content
** to the blob pContent.  If zName does not exist or if the content is
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
  rc = blob_compare(&onDisk, pContent);
  blob_reset(&onDisk);
  return rc==0;
}

/*
** Return the value of an environment variable as UTF8.
** Use fossil_filename_free() to release resources.
*/
char *fossil_getenv(const char *zName){
#ifdef _WIN32
  wchar_t *uName = fossil_utf8_to_unicode(zName);
  void *zValue = _wgetenv(uName);
  fossil_unicode_free(uName);
#else
  char *zValue = getenv(zName);
#endif
  if( zValue ) zValue = fossil_filename_to_utf8(zValue);
  return zValue;
}

/*
** Sets the value of an environment variable as UTF8.
*/
int fossil_setenv(const char *zName, const char *zValue){






|









|







1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
  rc = blob_compare(&onDisk, pContent);
  blob_reset(&onDisk);
  return rc==0;
}

/*
** Return the value of an environment variable as UTF8.
** Use fossil_path_free() to release resources.
*/
char *fossil_getenv(const char *zName){
#ifdef _WIN32
  wchar_t *uName = fossil_utf8_to_unicode(zName);
  void *zValue = _wgetenv(uName);
  fossil_unicode_free(uName);
#else
  char *zValue = getenv(zName);
#endif
  if( zValue ) zValue = fossil_path_to_utf8(zValue);
  return zValue;
}

/*
** Sets the value of an environment variable as UTF8.
*/
int fossil_setenv(const char *zName, const char *zValue){
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
/*
** Like fopen() but always takes a UTF8 argument.
*/
FILE *fossil_fopen(const char *zName, const char *zMode){
#ifdef _WIN32
  wchar_t *uMode = fossil_utf8_to_unicode(zMode);
  wchar_t *uName = fossil_utf8_to_filename(zName);
  FILE *f = _wfopen(uName, uMode);
  fossil_filename_free(uName);
  fossil_unicode_free(uMode);
#else
  FILE *f = fopen(zName, zMode);
#endif
  return f;
}






|

|






1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
/*
** Like fopen() but always takes a UTF8 argument.
*/
FILE *fossil_fopen(const char *zName, const char *zMode){
#ifdef _WIN32
  wchar_t *uMode = fossil_utf8_to_unicode(zMode);
  wchar_t *uName = fossil_utf8_to_path(zName, 0);
  FILE *f = _wfopen(uName, uMode);
  fossil_path_free(uName);
  fossil_unicode_free(uMode);
#else
  FILE *f = fopen(zName, zMode);
#endif
  return f;
}
Changes to src/finfo.c.
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    rid = db_int(0, "SELECT rid FROM vfile WHERE pathname=%B %s",
                 &fname, filename_collation());
    if( rid==0 ){
      fossil_fatal("no history for file: %b", &fname);
    }
    zFilename = blob_str(&fname);
    db_prepare(&q,
        "SELECT DISTINCT b.uuid, ci.uuid, date(event.mtime%s),"
        "       coalesce(event.ecomment, event.comment),"
        "       coalesce(event.euser, event.user),"
        "       (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
                                " AND tagxref.rid=mlink.mid)" /* Tags */
        "  FROM mlink, blob b, event, blob ci, filename"
        " WHERE filename.name=%Q %s"
        "   AND mlink.fnid=filename.fnid"
        "   AND b.rid=mlink.fid"
        "   AND event.objid=mlink.mid"
        "   AND event.objid=ci.rid"
        " ORDER BY event.mtime DESC LIMIT %d OFFSET %d",
        timeline_utc(), TAG_BRANCH, zFilename, filename_collation(),
        iLimit, iOffset
    );
    blob_zero(&line);
    if( iBrief ){
      fossil_print("History of %s\n", blob_str(&fname));
    }
    while( db_step(&q)==SQLITE_ROW ){






|











|







180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
    rid = db_int(0, "SELECT rid FROM vfile WHERE pathname=%B %s",
                 &fname, filename_collation());
    if( rid==0 ){
      fossil_fatal("no history for file: %b", &fname);
    }
    zFilename = blob_str(&fname);
    db_prepare(&q,
        "SELECT DISTINCT b.uuid, ci.uuid, date(event.mtime,toLocal()),"
        "       coalesce(event.ecomment, event.comment),"
        "       coalesce(event.euser, event.user),"
        "       (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
                                " AND tagxref.rid=mlink.mid)" /* Tags */
        "  FROM mlink, blob b, event, blob ci, filename"
        " WHERE filename.name=%Q %s"
        "   AND mlink.fnid=filename.fnid"
        "   AND b.rid=mlink.fid"
        "   AND event.objid=mlink.mid"
        "   AND event.objid=ci.rid"
        " ORDER BY event.mtime DESC LIMIT %d OFFSET %d",
        TAG_BRANCH, zFilename, filename_collation(),
        iLimit, iOffset
    );
    blob_zero(&line);
    if( iBrief ){
      fossil_print("History of %s\n", blob_str(&fname));
    }
    while( db_step(&q)==SQLITE_ROW ){
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329



330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367






368
369
370
371
372
373
374
375
376
377
378
379
380


381
382
383
384
385
386
387
388
389
390
391
392
393



394


395
396
397
398
399
400
401
402
403
404
405
406
407
408














409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
  const char *zFilename;
  char zPrevDate[20];
  const char *zA;
  const char *zB;
  int n;
  int baseCheckin;
  int fnid;
  Bag ancestor;
  Blob title;
  Blob sql;
  HQuery url;
  GraphContext *pGraph;
  int brBg = P("brbg")!=0;
  int uBg = P("ubg")!=0;
  int fDebug = atoi(PD("debug","0"));
  int fShowId = P("showid")!=0;


  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  style_header("File History");
  login_anonymous_available();
  url_initialize(&url, "finfo");
  if( brBg ) url_add_parameter(&url, "brbg", 0);
  if( uBg ) url_add_parameter(&url, "ubg", 0);
  baseCheckin = name_to_rid_www("ci");
  zPrevDate[0] = 0;
  zFilename = PD("name","");
  fnid = db_int(0, "SELECT fnid FROM filename WHERE name=%Q", zFilename);
  if( fnid==0 ){
    @ No such file: %h(zFilename)
    style_footer();
    return;
  }



  if( baseCheckin ){
    int baseFid = db_int(0,
      "SELECT fid FROM mlink WHERE fnid=%d AND mid=%d",
      fnid, baseCheckin
    );
    bag_init(&ancestor);
    if( baseFid ) bag_insert(&ancestor, baseFid);
  }
  url_add_parameter(&url, "name", zFilename);
  blob_zero(&sql);
  blob_append_sql(&sql,
    "SELECT"
    " datetime(min(event.mtime)%s),"                 /* Date of change */
    " coalesce(event.ecomment, event.comment),"      /* Check-in comment */
    " coalesce(event.euser, event.user),"            /* User who made chng */
    " mlink.pid,"                                    /* Parent file rid */
    " mlink.fid,"                                    /* File rid */
    " (SELECT uuid FROM blob WHERE rid=mlink.pid),"  /* Parent file uuid */
    " (SELECT uuid FROM blob WHERE rid=mlink.fid),"  /* Current file uuid */
    " (SELECT uuid FROM blob WHERE rid=mlink.mid),"  /* Check-in uuid */
    " event.bgcolor,"                                /* Background color */
    " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
                                " AND tagxref.rid=mlink.mid)," /* Branchname */
    " mlink.mid,"                                    /* check-in ID */
    " mlink.pfnid"                                   /* Previous filename */
    "  FROM mlink, event"
    " WHERE mlink.fnid=%d"
    "   AND event.objid=mlink.mid",
    timeline_utc(), TAG_BRANCH, fnid
  );
  if( (zA = P("a"))!=0 ){
    blob_append_sql(&sql, " AND event.mtime>=julianday('%q')", zA);
    url_add_parameter(&url, "a", zA);
  }
  if( (zB = P("b"))!=0 ){
    blob_append_sql(&sql, " AND event.mtime<=julianday('%q')", zB);
    url_add_parameter(&url, "b", zB);
  }






  /* We only want each version of a file to appear on the graph once,
  ** at its earliest appearance.  All the other times that it gets merged
  ** into this or that branch can be ignored.  An exception is for when
  ** files are deleted (when they have mlink.fid==0).  If the same file
  ** is deleted in multiple places, we want to show each deletion, so
  ** use a "fake fid" which is derived from the parent-fid for grouping.
  ** The same fake-fid must be used on the graph.
  */
  blob_append_sql(&sql,
    " GROUP BY"
    "   CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END"
    " ORDER BY event.mtime DESC /*sort*/"
  );


  if( (n = atoi(PD("n","0")))>0 ){
    blob_append_sql(&sql, " LIMIT %d", n);
    url_add_parameter(&url, "n", P("n"));
  }
  db_prepare(&q, "%s", blob_sql_text(&sql));
  if( P("showsql")!=0 ){
    @ <p>SQL: %h(blob_str(&sql))</p>
  }
  blob_reset(&sql);
  blob_zero(&title);
  if( baseCheckin ){
    char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", baseCheckin);
    char *zLink = 	href("%R/info/%!S", zUuid);



    blob_appendf(&title, "Ancestors of file ");


    hyperlinked_path(zFilename, &title, zUuid, "tree", "");
    if( fShowId ) blob_appendf(&title, " (%d)", fnid);
    blob_appendf(&title, " from check-in %z%S</a>", zLink, zUuid);
    if( fShowId ) blob_appendf(&title, " (%d)", baseCheckin);
    fossil_free(zUuid);
  }else{
    blob_appendf(&title, "History of files named ");
    hyperlinked_path(zFilename, &title, 0, "tree", "");
    if( fShowId ) blob_appendf(&title, " (%d)", fnid);
  }
  @ <h2>%b(&title)</h2>
  blob_reset(&title);
  pGraph = graph_init();
  @ <table id="timelineTable" class="timelineTable">














  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zCom = db_column_text(&q, 1);
    const char *zUser = db_column_text(&q, 2);
    int fpid = db_column_int(&q, 3);
    int frid = db_column_int(&q, 4);
    const char *zPUuid = db_column_text(&q, 5);
    const char *zUuid = db_column_text(&q, 6);
    const char *zCkin = db_column_text(&q,7);
    const char *zBgClr = db_column_text(&q, 8);
    const char *zBr = db_column_text(&q, 9);
    int fmid = db_column_int(&q, 10);
    int pfnid = db_column_int(&q, 11);
    int gidx;
    char zTime[10];
    int nParent = 0;
    int aParent[GR_MAX_RAIL];
    static Stmt qparent;

    if( baseCheckin && frid && !bag_find(&ancestor, frid) ) continue;
    db_static_prepare(&qparent,
      "SELECT DISTINCT pid FROM mlink"
      " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
      " ORDER BY isaux /*sort*/"
    );
    db_bind_int(&qparent, ":fid", frid);
    db_bind_int(&qparent, ":mid", fmid);
    db_bind_int(&qparent, ":fnid", fnid);
    while( db_step(&qparent)==SQLITE_ROW && nParent<ArraySize(aParent) ){
      aParent[nParent] = db_column_int(&qparent, 0);
      if( baseCheckin ) bag_insert(&ancestor, aParent[nParent]);
      nParent++;
    }
    db_reset(&qparent);
    if( zBr==0 ) zBr = "trunk";
    if( uBg ){
      zBgClr = hash_color(zUser);
    }else if( brBg || zBgClr==0 || zBgClr[0]==0 ){






<








>

















>
>
>

<
<
|
<
<
<





|















|









>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
<
|
>
>













>
>
>
|
>
>
|













>
>
>
>
>
>
>
>
>
>
>
>
>
>

















<

<
<
<
<
<
<





<







297
298
299
300
301
302
303

304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333


334



335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382

383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

450






451
452
453
454
455

456
457
458
459
460
461
462
  const char *zFilename;
  char zPrevDate[20];
  const char *zA;
  const char *zB;
  int n;
  int baseCheckin;
  int fnid;

  Blob title;
  Blob sql;
  HQuery url;
  GraphContext *pGraph;
  int brBg = P("brbg")!=0;
  int uBg = P("ubg")!=0;
  int fDebug = atoi(PD("debug","0"));
  int fShowId = P("showid")!=0;
  Stmt qparent;

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  style_header("File History");
  login_anonymous_available();
  url_initialize(&url, "finfo");
  if( brBg ) url_add_parameter(&url, "brbg", 0);
  if( uBg ) url_add_parameter(&url, "ubg", 0);
  baseCheckin = name_to_rid_www("ci");
  zPrevDate[0] = 0;
  zFilename = PD("name","");
  fnid = db_int(0, "SELECT fnid FROM filename WHERE name=%Q", zFilename);
  if( fnid==0 ){
    @ No such file: %h(zFilename)
    style_footer();
    return;
  }
  if( g.perm.Admin ){
    style_submenu_element("MLink Table", "mtab", "%R/mlink?name=%t", zFilename);
  }
  if( baseCheckin ){


    compute_direct_ancestors(baseCheckin);



  }
  url_add_parameter(&url, "name", zFilename);
  blob_zero(&sql);
  blob_append_sql(&sql,
    "SELECT"
    " datetime(min(event.mtime),toLocal()),"         /* Date of change */
    " coalesce(event.ecomment, event.comment),"      /* Check-in comment */
    " coalesce(event.euser, event.user),"            /* User who made chng */
    " mlink.pid,"                                    /* Parent file rid */
    " mlink.fid,"                                    /* File rid */
    " (SELECT uuid FROM blob WHERE rid=mlink.pid),"  /* Parent file uuid */
    " (SELECT uuid FROM blob WHERE rid=mlink.fid),"  /* Current file uuid */
    " (SELECT uuid FROM blob WHERE rid=mlink.mid),"  /* Check-in uuid */
    " event.bgcolor,"                                /* Background color */
    " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
                                " AND tagxref.rid=mlink.mid)," /* Branchname */
    " mlink.mid,"                                    /* check-in ID */
    " mlink.pfnid"                                   /* Previous filename */
    "  FROM mlink, event"
    " WHERE mlink.fnid=%d"
    "   AND event.objid=mlink.mid",
    TAG_BRANCH, fnid
  );
  if( (zA = P("a"))!=0 ){
    blob_append_sql(&sql, " AND event.mtime>=julianday('%q')", zA);
    url_add_parameter(&url, "a", zA);
  }
  if( (zB = P("b"))!=0 ){
    blob_append_sql(&sql, " AND event.mtime<=julianday('%q')", zB);
    url_add_parameter(&url, "b", zB);
  }
  if( baseCheckin ){
    blob_append_sql(&sql,
      " AND mlink.mid IN (SELECT rid FROM ancestor)"
      " GROUP BY mlink.fid"
    );
  }else{
    /* We only want each version of a file to appear on the graph once,
    ** at its earliest appearance.  All the other times that it gets merged
    ** into this or that branch can be ignored.  An exception is for when
    ** files are deleted (when they have mlink.fid==0).  If the same file
    ** is deleted in multiple places, we want to show each deletion, so
    ** use a "fake fid" which is derived from the parent-fid for grouping.
    ** The same fake-fid must be used on the graph.
    */
    blob_append_sql(&sql,
      " GROUP BY"
      "   CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END"

    );
  }
  blob_append_sql(&sql, " ORDER BY event.mtime DESC /*sort*/");
  if( (n = atoi(PD("n","0")))>0 ){
    blob_append_sql(&sql, " LIMIT %d", n);
    url_add_parameter(&url, "n", P("n"));
  }
  db_prepare(&q, "%s", blob_sql_text(&sql));
  if( P("showsql")!=0 ){
    @ <p>SQL: %h(blob_str(&sql))</p>
  }
  blob_reset(&sql);
  blob_zero(&title);
  if( baseCheckin ){
    char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", baseCheckin);
    char *zLink = 	href("%R/info/%!S", zUuid);
    if( n>0 ){
      blob_appendf(&title, "First %d ancestors of file ", n);
    }else{
      blob_appendf(&title, "Ancestors of file ");
    }
    blob_appendf(&title,"<a href='%R/finfo?name=%T'>%h</a>",
                 zFilename, zFilename);
    if( fShowId ) blob_appendf(&title, " (%d)", fnid);
    blob_appendf(&title, " from check-in %z%S</a>", zLink, zUuid);
    if( fShowId ) blob_appendf(&title, " (%d)", baseCheckin);
    fossil_free(zUuid);
  }else{
    blob_appendf(&title, "History of files named ");
    hyperlinked_path(zFilename, &title, 0, "tree", "");
    if( fShowId ) blob_appendf(&title, " (%d)", fnid);
  }
  @ <h2>%b(&title)</h2>
  blob_reset(&title);
  pGraph = graph_init();
  @ <table id="timelineTable" class="timelineTable">
  if( baseCheckin ){
    db_prepare(&qparent,
      "SELECT DISTINCT pid FROM mlink"
      " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
      "   AND pmid IN (SELECT rid FROM ancestor)"
      " ORDER BY isaux /*sort*/"
    );
  }else{
    db_prepare(&qparent,
      "SELECT DISTINCT pid FROM mlink"
      " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
      " ORDER BY isaux /*sort*/"
    );
  }
  while( db_step(&q)==SQLITE_ROW ){
    const char *zDate = db_column_text(&q, 0);
    const char *zCom = db_column_text(&q, 1);
    const char *zUser = db_column_text(&q, 2);
    int fpid = db_column_int(&q, 3);
    int frid = db_column_int(&q, 4);
    const char *zPUuid = db_column_text(&q, 5);
    const char *zUuid = db_column_text(&q, 6);
    const char *zCkin = db_column_text(&q,7);
    const char *zBgClr = db_column_text(&q, 8);
    const char *zBr = db_column_text(&q, 9);
    int fmid = db_column_int(&q, 10);
    int pfnid = db_column_int(&q, 11);
    int gidx;
    char zTime[10];
    int nParent = 0;
    int aParent[GR_MAX_RAIL];








    db_bind_int(&qparent, ":fid", frid);
    db_bind_int(&qparent, ":mid", fmid);
    db_bind_int(&qparent, ":fnid", fnid);
    while( db_step(&qparent)==SQLITE_ROW && nParent<ArraySize(aParent) ){
      aParent[nParent] = db_column_int(&qparent, 0);

      nParent++;
    }
    db_reset(&qparent);
    if( zBr==0 ) zBr = "trunk";
    if( uBg ){
      zBgClr = hash_color(zUser);
    }else if( brBg || zBgClr==0 || zBgClr[0]==0 ){
455
456
457
458
459
460
461
462

463
464
465
466
467
468
469
      @   <div class="divider timelineDate">%s(zPrevDate)</div>
      @ </td><td></td><td></td></tr>
    }
    memcpy(zTime, &zDate[11], 5);
    zTime[5] = 0;
    @ <tr><td class="timelineTime">
    @ %z(href("%R/timeline?c=%t",zDate))%s(zTime)</a></td>
    @ <td class="timelineGraph"><div id="m%d(gidx)" class="tl-nodemark"></div></td>

    if( zBgClr && zBgClr[0] ){
      @ <td class="timelineTableCell" style="background-color: %h(zBgClr);">
    }else{
      @ <td class="timelineTableCell">
    }
    if( zUuid ){
      if( nParent==0 ){






|
>







471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
      @   <div class="divider timelineDate">%s(zPrevDate)</div>
      @ </td><td></td><td></td></tr>
    }
    memcpy(zTime, &zDate[11], 5);
    zTime[5] = 0;
    @ <tr><td class="timelineTime">
    @ %z(href("%R/timeline?c=%t",zDate))%s(zTime)</a></td>
    @ <td class="timelineGraph"><div id="m%d(gidx)" class="tl-nodemark"></div>
    @ </td>
    if( zBgClr && zBgClr[0] ){
      @ <td class="timelineTableCell" style="background-color: %h(zBgClr);">
    }else{
      @ <td class="timelineTableCell">
    }
    if( zUuid ){
      if( nParent==0 ){
526
527
528
529
530
531
532

533
534
535
536
537
538
539
540
541
542
543
544
545














































































































































































      zAncLink = href("%R/finfo?name=%T&ci=%!S&debug=1",zFilename,zCkin);
      @ %z(zAncLink)[ancestry]</a>
    }
    tag_private_status(frid);
    @ </td></tr>
  }
  db_finalize(&q);

  if( pGraph ){
    graph_finish(pGraph, 1);
    if( pGraph->nErr ){
      graph_free(pGraph);
      pGraph = 0;
    }else{
      @ <tr class="timelineBottom"><td></td><td></td><td></td></tr>
    }
  }
  @ </table>
  timeline_output_graph_javascript(pGraph, 0, 1);
  style_footer();
}




















































































































































































>













>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
      zAncLink = href("%R/finfo?name=%T&ci=%!S&debug=1",zFilename,zCkin);
      @ %z(zAncLink)[ancestry]</a>
    }
    tag_private_status(frid);
    @ </td></tr>
  }
  db_finalize(&q);
  db_finalize(&qparent);
  if( pGraph ){
    graph_finish(pGraph, 1);
    if( pGraph->nErr ){
      graph_free(pGraph);
      pGraph = 0;
    }else{
      @ <tr class="timelineBottom"><td></td><td></td><td></td></tr>
    }
  }
  @ </table>
  timeline_output_graph_javascript(pGraph, 0, 1);
  style_footer();
}

/*
** WEBPAGE: mlink
** URL: /mlink?name=FILENAME
** URL: /mlink?ci=NAME
**
** Show all MLINK table entries for a particular file, or for
** a particular check-in.  This screen is intended for use by developers
** in debugging Fossil.
*/
void mlink_page(void){
  const char *zFName = P("name");
  const char *zCI = P("ci");
  Stmt q;
  
  login_check_credentials();
  if( !g.perm.Admin ){ login_needed(g.anon.Admin); return; }
  style_header("MLINK Table");
  if( zFName==0 && zCI==0 ){
    @ <span class='generalError'>
    @ Requires either a name= or ci= query parameter
    @ </span>
  }else if( zFName ){
    int fnid = db_int(0,"SELECT fnid FROM filename WHERE name=%Q",zFName);
    if( fnid<=0 ) fossil_fatal("no such file: \"%s\"", zFName);
    db_prepare(&q,
       "SELECT"
       /* 0 */ "  datetime(event.mtime,toLocal()),"
       /* 1 */ "  (SELECT uuid FROM blob WHERE rid=mlink.mid),"
       /* 2 */ "  (SELECT uuid FROM blob WHERE rid=mlink.pmid),"
       /* 3 */ "  isaux,"
       /* 4 */ "  (SELECT uuid FROM blob WHERE rid=mlink.fid),"
       /* 5 */ "  (SELECT uuid FROM blob WHERE rid=mlink.pid),"
       /* 6 */ "  mlink.pid,"
       /* 7 */ "  mperm,"
       /* 8 */ "  (SELECT name FROM filename WHERE fnid=mlink.pfnid)"
       "  FROM mlink, event"
       " WHERE mlink.fnid=%d"
       "   AND event.objid=mlink.mid"
       " ORDER BY 1 DESC",
       fnid
    );
    @ <h1>MLINK table for file
    @ <a href='%R/finfo?name=%t(zFName)'>%h(zFName)</a></h1>
    @ <div class='brlist'>
    @ <table id='mlinktable'>
    @ <thead><tr>
    @ <th>Date</th>
    @ <th>Check-in</th>
    @ <th>Parent Check-in</th>
    @ <th>Merge?</th>
    @ <th>New</th>
    @ <th>Old</th>
    @ <th>Exe Bit?</th>
    @ <th>Prior Name</th>
    @ </tr></thead>
    @ <tbody>
    while( db_step(&q)==SQLITE_ROW ){
      const char *zDate = db_column_text(&q,0);
      const char *zCkin = db_column_text(&q,1);
      const char *zParent = db_column_text(&q,2);
      int isMerge = db_column_int(&q,3);
      const char *zFid = db_column_text(&q,4);
      const char *zPid = db_column_text(&q,5);
      int isExe = db_column_int(&q,7);
      const char *zPrior = db_column_text(&q,8);
      @ <tr>
      @ <td><a href='%R/timeline?c=%!S(zCkin)'>%s(zDate)</a></td>
      @ <td><a href='%R/info/%!S(zCkin)'>%S(zCkin)</a></td>
      if( zParent ){
        @ <td><a href='%R/info/%!S(zPid)'>%S(zParent)</a></td>
      }else{
        @ <td><i>(New)</i></td>
      }
      @ <td align='center'>%s(isMerge?"&#x2713;":"")</td>
      if( zFid ){
        @ <td><a href='%R/info/%!S(zFid)'>%S(zFid)</a></td>
      }else{
        @ <td><i>(Deleted)</i></td>
      }
      if( zPid ){
        @ <td><a href='%R/info/%!S(zPid)'>%S(zPid)</a>
      }else if( db_column_int(&q,6)<0 ){
        @ <td><i>(Added by merge)</i></td>
      }else{
        @ <td><i>(New)</i></td>
      }
      @ <td align='center'>%s(isExe?"&#x2713;":"")</td>
      if( zPrior ){
        @ <td><a href='%R/finfo?name=%t(zPrior)'>%h(zPrior)</a></td>
      }else{
        @ <td></td>
      }
      @ </tr>
    }
    db_finalize(&q);
    @ </tbody>
    @ </table>
    @ </div>
    output_table_sorting_javascript("mlinktable","tttxtttt",1);
  }else{
    int mid = name_to_rid_www("ci");
    db_prepare(&q,
       "SELECT"
       /* 0 */ "  (SELECT name FROM filename WHERE fnid=mlink.fnid),"
       /* 1 */ "  (SELECT uuid FROM blob WHERE rid=mlink.fid),"
       /* 2 */ "  pid,"
       /* 3 */ "  (SELECT uuid FROM blob WHERE rid=mlink.pid),"
       /* 4 */ "  (SELECT name FROM filename WHERE fnid=mlink.pfnid),"
       /* 5 */ "  (SELECT uuid FROM blob WHERE rid=mlink.pmid),"
       /* 6 */ "  mperm,"
       /* 7 */ "  isaux"
       "  FROM mlink WHERE mid=%d ORDER BY 1",
       mid
    );
    @ <h1>MLINK table for check-in %h(zCI)</h1>
    render_checkin_context(mid, 1);
    @ <hr>
    @ <div class='brlist'>
    @ <table id='mlinktable'>
    @ <thead><tr>
    @ <th>File</th>
    @ <th>From</th>
    @ <th>Merge?</th>
    @ <th>New</th>
    @ <th>Old</th>
    @ <th>Exe Bit?</th>
    @ <th>Prior Name</th>
    @ </tr></thead>
    @ <tbody>
    while( db_step(&q)==SQLITE_ROW ){
      const char *zName = db_column_text(&q,0);
      const char *zFid = db_column_text(&q,1);
      const char *zPid = db_column_text(&q,3);
      const char *zPrior = db_column_text(&q,4);
      const char *zParent = db_column_text(&q,5);
      int isExec = db_column_int(&q,6);
      int isAux = db_column_int(&q,7);
      @ <tr>
      @ <td><a href='%R/finfo?name=%t(zName)'>%h(zName)</a></td>
      if( zParent ){
        @ <td><a href='%R/info/%!S(zPid)'>%S(zParent)</a></td>
      }else{
        @ <td><i>(New)</i></td>
      }
      @ <td align='center'>%s(isAux?"&#x2713;":"")</td>
      if( zFid ){
        @ <td><a href='%R/info/%!S(zFid)'>%S(zFid)</a></td>
      }else{
        @ <td><i>(Deleted)</i></td>
      }
      if( zPid ){
        @ <td><a href='%R/info/%!S(zPid)'>%S(zPid)</a>
      }else if( db_column_int(&q,2)<0 ){
        @ <td><i>(Added by merge)</i></td>
      }else{
        @ <td><i>(New)</i></td>
      }
      @ <td align='center'>%s(isExec?"&#x2713;":"")</td>
      if( zPrior ){
        @ <td><a href='%R/finfo?name=%t(zPrior)'>%h(zPrior)</a></td>
      }else{
        @ <td></td>
      }
      @ </tr>
    }
    db_finalize(&q);
    @ </tbody>
    @ </table>
    @ </div>
    output_table_sorting_javascript("mlinktable","ttxtttt",1);
  }
  style_footer();
}
Changes to src/http_transport.c.
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  }
}

/*
** Default SSH command
*/
#ifdef _WIN32
static char zDefaultSshCmd[] = "plink -ssh -T";
#else
static char zDefaultSshCmd[] = "ssh -e none -T";
#endif

/*
** SSH initialization of the transport layer
*/
int transport_ssh_open(UrlData *pUrlData){
  /* For SSH we need to create and run SSH fossil http
  ** to talk to the remote machine.
  */
  const char *zSsh;  /* The base SSH command */
  Blob zCmd;         /* The SSH command */
  char *zHost;       /* The host name to contact */
  int n;             /* Size of prefix string */

  socket_ssh_resolve_addr(pUrlData);
  zSsh = db_get("ssh-command", zDefaultSshCmd);
  blob_init(&zCmd, zSsh, -1);






|

|









|







76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  }
}

/*
** Default SSH command
*/
#ifdef _WIN32
static const char zDefaultSshCmd[] = "plink -ssh -T";
#else
static const char zDefaultSshCmd[] = "ssh -e none -T";
#endif

/*
** SSH initialization of the transport layer
*/
int transport_ssh_open(UrlData *pUrlData){
  /* For SSH we need to create and run SSH fossil http
  ** to talk to the remote machine.
  */
  char *zSsh;        /* The base SSH command */
  Blob zCmd;         /* The SSH command */
  char *zHost;       /* The host name to contact */
  int n;             /* Size of prefix string */

  socket_ssh_resolve_addr(pUrlData);
  zSsh = db_get("ssh-command", zDefaultSshCmd);
  blob_init(&zCmd, zSsh, -1);
Changes to src/import.c.
126
127
128
129
130
131
132
133





134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152



153
154
155
156
157
158
159
** Insert an artifact into the BLOB table if it isn't there already.
** If zMark is not zero, create a cross-reference from that mark back
** to the newly inserted artifact.
**
** If saveUuid is true, then pContent is a commit record.  Record its
** UUID in gg.zPrevCheckin.
*/
static int fast_insert_content(Blob *pContent, const char *zMark, int saveUuid){





  Blob hash;
  Blob cmpr;
  int rid;

  sha1sum_blob(pContent, &hash);
  rid = db_int(0, "SELECT rid FROM blob WHERE uuid=%B", &hash);
  if( rid==0 ){
    static Stmt ins;
    db_static_prepare(&ins,
        "INSERT INTO blob(uuid, size, content) VALUES(:uuid, :size, :content)"
    );
    db_bind_text(&ins, ":uuid", blob_str(&hash));
    db_bind_int(&ins, ":size", gg.nData);
    blob_compress(pContent, &cmpr);
    db_bind_blob(&ins, ":content", &cmpr);
    db_step(&ins);
    db_reset(&ins);
    blob_reset(&cmpr);
    rid = db_last_insert_rowid();



  }
  if( zMark ){
    db_multi_exec(
        "INSERT OR IGNORE INTO xmark(tname, trid, tuuid)"
        "VALUES(%Q,%d,%B)",
        zMark, rid, &hash
    );






|
>
>
>
>
>



















>
>
>







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
** Insert an artifact into the BLOB table if it isn't there already.
** If zMark is not zero, create a cross-reference from that mark back
** to the newly inserted artifact.
**
** If saveUuid is true, then pContent is a commit record.  Record its
** UUID in gg.zPrevCheckin.
*/
static int fast_insert_content(
  Blob *pContent,          /* Content to insert */
  const char *zMark,       /* Label using this mark, if not NULL */
  int saveUuid,            /* Save SHA1 hash in gg.zPrevCheckin */
  int doParse              /* Invoke manifest_crosslink() */
){
  Blob hash;
  Blob cmpr;
  int rid;

  sha1sum_blob(pContent, &hash);
  rid = db_int(0, "SELECT rid FROM blob WHERE uuid=%B", &hash);
  if( rid==0 ){
    static Stmt ins;
    db_static_prepare(&ins,
        "INSERT INTO blob(uuid, size, content) VALUES(:uuid, :size, :content)"
    );
    db_bind_text(&ins, ":uuid", blob_str(&hash));
    db_bind_int(&ins, ":size", gg.nData);
    blob_compress(pContent, &cmpr);
    db_bind_blob(&ins, ":content", &cmpr);
    db_step(&ins);
    db_reset(&ins);
    blob_reset(&cmpr);
    rid = db_last_insert_rowid();
    if( doParse ){
      manifest_crosslink(rid, pContent, MC_NONE);
    }
  }
  if( zMark ){
    db_multi_exec(
        "INSERT OR IGNORE INTO xmark(tname, trid, tuuid)"
        "VALUES(%Q,%d,%B)",
        zMark, rid, &hash
    );
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
/*
** Use data accumulated in gg from a "blob" record to add a new file
** to the BLOB table.
*/
static void finish_blob(void){
  Blob content;
  blob_init(&content, gg.aData, gg.nData);
  fast_insert_content(&content, gg.zMark, 0);
  blob_reset(&content);
  import_reset(0);
}

/*
** Use data accumulated in gg from a "tag" record to add a new
** control artifact to the BLOB table.
*/
static void finish_tag(void){
  Blob record, cksum;
  if( gg.zDate && gg.zTag && gg.zFrom && gg.zUser ){
    blob_zero(&record);
    blob_appendf(&record, "D %s\n", gg.zDate);
    blob_appendf(&record, "T +%F %s\n", gg.zTag, gg.zFrom);
    blob_appendf(&record, "U %F\n", gg.zUser);
    md5sum_blob(&record, &cksum);
    blob_appendf(&record, "Z %b\n", &cksum);
    fast_insert_content(&record, 0, 0);
    blob_reset(&record);
    blob_reset(&cksum);
  }
  import_reset(0);
}

/*
** Compare two ImportFile objects for sorting






|

















|
<







182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207

208
209
210
211
212
213
214
/*
** Use data accumulated in gg from a "blob" record to add a new file
** to the BLOB table.
*/
static void finish_blob(void){
  Blob content;
  blob_init(&content, gg.aData, gg.nData);
  fast_insert_content(&content, gg.zMark, 0, 0);
  blob_reset(&content);
  import_reset(0);
}

/*
** Use data accumulated in gg from a "tag" record to add a new
** control artifact to the BLOB table.
*/
static void finish_tag(void){
  Blob record, cksum;
  if( gg.zDate && gg.zTag && gg.zFrom && gg.zUser ){
    blob_zero(&record);
    blob_appendf(&record, "D %s\n", gg.zDate);
    blob_appendf(&record, "T +%F %s\n", gg.zTag, gg.zFrom);
    blob_appendf(&record, "U %F\n", gg.zUser);
    md5sum_blob(&record, &cksum);
    blob_appendf(&record, "Z %b\n", &cksum);
    fast_insert_content(&record, 0, 0, 1);

    blob_reset(&cksum);
  }
  import_reset(0);
}

/*
** Compare two ImportFile objects for sorting
236
237
238
239
240
241
242

243
244
245
246
247
248
249
  Blob record, cksum;

  import_prior_files();
  qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp);
  blob_zero(&record);
  blob_appendf(&record, "C %F\n", gg.zComment);
  blob_appendf(&record, "D %s\n", gg.zDate);

  for(i=0; i<gg.nFile; i++){
    const char *zUuid = gg.aFile[i].zUuid;
    if( zUuid==0 ) continue;
    blob_appendf(&record, "F %F %s", gg.aFile[i].zName, zUuid);
    if( gg.aFile[i].isExe ){
      blob_append(&record, " x\n", 3);
    }else if( gg.aFile[i].isLink ){






>







243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
  Blob record, cksum;

  import_prior_files();
  qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp);
  blob_zero(&record);
  blob_appendf(&record, "C %F\n", gg.zComment);
  blob_appendf(&record, "D %s\n", gg.zDate);
  if( !g.fQuiet ) fossil_print("%.10s\r", gg.zDate);
  for(i=0; i<gg.nFile; i++){
    const char *zUuid = gg.aFile[i].zUuid;
    if( zUuid==0 ) continue;
    blob_appendf(&record, "F %F %s", gg.aFile[i].zName, zUuid);
    if( gg.aFile[i].isExe ){
      blob_append(&record, " x\n", 3);
    }else if( gg.aFile[i].isLink ){
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
  free(zFromBranch);
  db_multi_exec("INSERT INTO xbranch(tname, brnm) VALUES(%Q,%Q)",
                gg.zMark, gg.zBranch);
  blob_appendf(&record, "U %F\n", gg.zUser);
  md5sum_blob(&record, &cksum);
  blob_appendf(&record, "Z %b\n", &cksum);
  fast_insert_content(&record, gg.zMark, 1);
  blob_reset(&record);
  blob_reset(&cksum);

  /* The "git fast-export" command might output multiple "commit" lines
  ** that reference a tag using "refs/tags/TAGNAME".  The tag should only
  ** be applied to the last commit that is output.  The problem is we do not
  ** know at this time if the current commit is the last one to hold this
  ** tag or not.  So make an entry in the XTAG table to record this tag






|
<







296
297
298
299
300
301
302
303

304
305
306
307
308
309
310
  free(zFromBranch);
  db_multi_exec("INSERT INTO xbranch(tname, brnm) VALUES(%Q,%Q)",
                gg.zMark, gg.zBranch);
  blob_appendf(&record, "U %F\n", gg.zUser);
  md5sum_blob(&record, &cksum);
  blob_appendf(&record, "Z %b\n", &cksum);
  fast_insert_content(&record, gg.zMark, 1, 1);

  blob_reset(&cksum);

  /* The "git fast-export" command might output multiple "commit" lines
  ** that reference a tag using "refs/tags/TAGNAME".  The tag should only
  ** be applied to the last commit that is output.  The problem is we do not
  ** know at this time if the current commit is the last one to hold this
  ** tag or not.  So make an entry in the XTAG table to record this tag
461
462
463
464
465
466
467

468












469
470
471
472
473
474
475
static void dequote_git_filename(char *zName){
  int n, i, j;
  if( zName==0 || zName[0]!='"' ) return;
  n = (int)strlen(zName);
  if( zName[n-1]!='"' ) return;
  for(i=0, j=1; j<n-1; j++){
    char c = zName[j];

    if( c=='\\' ) c = zName[++j];












    zName[i++] = c;
  }
  zName[i] = 0;
}


/*






>
|
>
>
>
>
>
>
>
>
>
>
>
>







468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
static void dequote_git_filename(char *zName){
  int n, i, j;
  if( zName==0 || zName[0]!='"' ) return;
  n = (int)strlen(zName);
  if( zName[n-1]!='"' ) return;
  for(i=0, j=1; j<n-1; j++){
    char c = zName[j];
    int x;
    if( c=='\\' ){
      if( j+3 <= n-1
       && zName[j+1]>='0' && zName[j+1]<='3'
       && zName[j+2]>='0' && zName[j+2]<='7'
       && zName[j+3]>='0' && zName[j+3]<='7'
       && (x = 64*(zName[j+1]-'0') + 8*(zName[j+2]-'0') + zName[j+3]-'0')!=0
      ){
        c = (unsigned char)x;
        j += 3;
      }else{
        c = zName[++j];
      }
    }
    zName[i++] = c;
  }
  zName[i] = 0;
}


/*
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
    }else
    if( strncmp(zLine, "tag ", 4)==0 ){
      gg.xFinish();
      gg.xFinish = finish_tag;
      trim_newline(&zLine[4]);
      gg.zTag = fossil_strdup(&zLine[4]);
    }else
    if( strncmp(zLine, "reset ", 4)==0 ){
      gg.xFinish();
    }else
    if( strncmp(zLine, "checkpoint", 10)==0 ){
      gg.xFinish();
    }else
    if( strncmp(zLine, "feature", 7)==0 ){
      gg.xFinish();






|







547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
    }else
    if( strncmp(zLine, "tag ", 4)==0 ){
      gg.xFinish();
      gg.xFinish = finish_tag;
      trim_newline(&zLine[4]);
      gg.zTag = fossil_strdup(&zLine[4]);
    }else
    if( strncmp(zLine, "reset ", 6)==0 ){
      gg.xFinish();
    }else
    if( strncmp(zLine, "checkpoint", 10)==0 ){
      gg.xFinish();
    }else
    if( strncmp(zLine, "feature", 7)==0 ){
      gg.xFinish();
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
          fossil_fatal("Missing Node-kind");
        }
        if( strncmp(zKind, "dir", 3)!=0 ){
          if( deltaFlag ){
            Blob deltaSrc;
            Blob target;
            rid = db_int(0, "SELECT rid FROM blob WHERE uuid=("
                            " SELECT uuid FROM xfiles"
                            "  WHERE tpath=%Q AND tbranch=%d"
                            ")", zFile, branchId);
            content_get(rid, &deltaSrc);
            svn_apply_svndiff(&rec.content, &deltaSrc, &target);
            rid = content_put(&target);
          }else{
            rid = content_put(&rec.content);






|







1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
          fossil_fatal("Missing Node-kind");
        }
        if( strncmp(zKind, "dir", 3)!=0 ){
          if( deltaFlag ){
            Blob deltaSrc;
            Blob target;
            rid = db_int(0, "SELECT rid FROM blob WHERE uuid=("
                            " SELECT tuuid FROM xfiles"
                            "  WHERE tpath=%Q AND tbranch=%d"
                            ")", zFile, branchId);
            content_get(rid, &deltaSrc);
            svn_apply_svndiff(&rec.content, &deltaSrc, &target);
            rid = content_put(&target);
          }else{
            rid = content_put(&rec.content);
1488
1489
1490
1491
1492
1493
1494



1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507


1508
1509
1510
1511
1512
1513
1514
**                  --tags FOLDER      Name of tags folder
**                  --base PATH        Path to project root in repository
**                  --flat             The whole dump is a single branch
**
** Common Options:
**   -i|--incremental   allow importing into an existing repository
**   -f|--force         overwrite repository if already exist



**
** The --incremental option allows an existing repository to be extended
** with new content.
**
**
** See also: export
*/
void import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int svnFlag = find_option("svn", 0, 0)!=0;



  /* Options common to all input formats */
  int incrFlag = find_option("incremental", "i", 0)!=0;

  /* Options for --svn only */
  const char *zBase="";
  int flatFlag=0;






>
>
>



<









>
>







1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520

1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
**                  --tags FOLDER      Name of tags folder
**                  --base PATH        Path to project root in repository
**                  --flat             The whole dump is a single branch
**
** Common Options:
**   -i|--incremental   allow importing into an existing repository
**   -f|--force         overwrite repository if already exist
**   -q|--quiet         omit progress output
**   --no-rebuild       skip the "rebuilding metadata" step
**   --no-vacuum        skip the final VACUUM of the database file
**
** The --incremental option allows an existing repository to be extended
** with new content.

**
** See also: export
*/
void import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int svnFlag = find_option("svn", 0, 0)!=0;
  int omitRebuild = find_option("no-rebuild",0,0)!=0;
  int omitVacuum = find_option("no-vacuum",0,0)!=0;

  /* Options common to all input formats */
  int incrFlag = find_option("incremental", "i", 0)!=0;

  /* Options for --svn only */
  const char *zBase="";
  int flatFlag=0;
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
    fossil_binary_mode(pIn);
  }
  if( !incrFlag ){
    if( forceFlag ) file_delete(g.argv[2]);
    db_create_repository(g.argv[2]);
  }
  db_open_repository(g.argv[2]);
  db_open_config(0);

  db_begin_transaction();
  if( !incrFlag ) db_initial_setup(0, 0, 0);

  if( svnFlag ){
    db_multi_exec(
       "CREATE TEMP TABLE xrevisions("






|







1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
    fossil_binary_mode(pIn);
  }
  if( !incrFlag ){
    if( forceFlag ) file_delete(g.argv[2]);
    db_create_repository(g.argv[2]);
  }
  db_open_repository(g.argv[2]);
  db_open_config(0, 0);

  db_begin_transaction();
  if( !incrFlag ) db_initial_setup(0, 0, 0);

  if( svnFlag ){
    db_multi_exec(
       "CREATE TEMP TABLE xrevisions("
1623
1624
1625
1626
1627
1628
1629

1630
1631
1632
1633
1634
1635
1636
1637
1638

1639
1640
1641
1642




1643
1644
1645
1646
1647


1648
1649

1650
1651
1652
1653
1654
1655
1656
1657
    */
    db_multi_exec(
       "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
       "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
       "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
    );


    git_fast_import(pIn);
    db_prepare(&q, "SELECT tcontent FROM xtag");
    while( db_step(&q)==SQLITE_ROW ){
      Blob record;
      db_ephemeral_blob(&q, 0, &record);
      fast_insert_content(&record, 0, 0);
      import_reset(0);
    }
    db_finalize(&q);

  }

  verify_cancel();
  db_end_transaction(0);




  db_begin_transaction();
  fossil_print("Rebuilding repository meta-data...\n");
  rebuild_db(0, 1, !incrFlag);
  verify_cancel();
  db_end_transaction(0);


  fossil_print("Vacuuming..."); fflush(stdout);
  db_multi_exec("VACUUM");

  fossil_print(" ok\n");
  if( !incrFlag ){
    fossil_print("project-id: %s\n", db_get("project-code", 0));
    fossil_print("server-id:  %s\n", db_get("server-code", 0));
    zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
    fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword);
  }
}






>





|



>




>
>
>
>
|
|
|
|
|
>
>
|
|
>








1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
    */
    db_multi_exec(
       "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
       "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
       "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
    );

    manifest_crosslink_begin();
    git_fast_import(pIn);
    db_prepare(&q, "SELECT tcontent FROM xtag");
    while( db_step(&q)==SQLITE_ROW ){
      Blob record;
      db_ephemeral_blob(&q, 0, &record);
      fast_insert_content(&record, 0, 0, 1);
      import_reset(0);
    }
    db_finalize(&q);
    manifest_crosslink_end(MC_NONE);
  }

  verify_cancel();
  db_end_transaction(0);
  fossil_print("                               \r");
  if( omitRebuild ){
    omitVacuum = 1;
  }else{
    db_begin_transaction();
    fossil_print("Rebuilding repository meta-data...\n");
    rebuild_db(0, 1, !incrFlag);
    verify_cancel();
    db_end_transaction(0);
  }
  if( !omitVacuum ){
    fossil_print("Vacuuming..."); fflush(stdout);
    db_multi_exec("VACUUM");
  }
  fossil_print(" ok\n");
  if( !incrFlag ){
    fossil_print("project-id: %s\n", db_get("project-code", 0));
    fossil_print("server-id:  %s\n", db_get("server-code", 0));
    zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
    fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword);
  }
}
Changes to src/info.c.
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  i64 fsize;
  int verboseFlag = find_option("verbose","v",0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail","l",0)!=0; /* deprecated */
  }

  if( g.argc==3 && (fsize = file_size(g.argv[2]))>0 && (fsize&0x1ff)==0 ){
    db_open_config(0);
    db_open_repository(g.argv[2]);
    db_record_repository_filename(g.argv[2]);
    fossil_print("project-name: %s\n", db_get("project-name", "<unnamed>"));
    fossil_print("project-code: %s\n", db_get("project-code", "<none>"));
    extraRepoInfo();
    return;
  }






|







182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  i64 fsize;
  int verboseFlag = find_option("verbose","v",0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail","l",0)!=0; /* deprecated */
  }

  if( g.argc==3 && (fsize = file_size(g.argv[2]))>0 && (fsize&0x1ff)==0 ){
    db_open_config(0, 0);
    db_open_repository(g.argv[2]);
    db_record_repository_filename(g.argv[2]);
    fossil_print("project-name: %s\n", db_get("project-name", "<unnamed>"));
    fossil_print("project-code: %s\n", db_get("project-code", "<none>"));
    extraRepoInfo();
    return;
  }
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
    }
    fossil_print("project-code: %s\n", db_get("project-code", ""));
    vid = g.localOpen ? db_lget_int("checkout", 0) : 0;
    if( vid ){
      show_common_info(vid, "checkout:", 1, 1);
    }
    fossil_print("check-ins:    %d\n",
                 db_int(-1, "SELECT count(*) FROM event WHERE type='ci' /*scan*/"));
  }else{
    int rid;
    rid = name_to_rid(g.argv[2]);
    if( rid==0 ){
      fossil_fatal("no such object: %s\n", g.argv[2]);
    }
    show_common_info(rid, "uuid:", 1, 1);
  }
}

/*
** Show information about all tags on a given check-in.
*/
static void showTags(int rid){
  Stmt q;
  int cnt = 0;
  db_prepare(&q,
    "SELECT tag.tagid, tagname, "
    "       (SELECT uuid FROM blob WHERE rid=tagxref.srcid AND rid!=%d),"
    "       value, datetime(tagxref.mtime%s), tagtype,"
    "       (SELECT uuid FROM blob WHERE rid=tagxref.origid AND rid!=%d)"
    "  FROM tagxref JOIN tag ON tagxref.tagid=tag.tagid"
    " WHERE tagxref.rid=%d"
    " ORDER BY tagname /*sort*/", rid, timeline_utc(), rid, rid
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zTagname = db_column_text(&q, 1);
    const char *zSrcUuid = db_column_text(&q, 2);
    const char *zValue = db_column_text(&q, 3);
    const char *zDate = db_column_text(&q, 4);
    int tagtype = db_column_int(&q, 5);






|



















|



|







211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
    }
    fossil_print("project-code: %s\n", db_get("project-code", ""));
    vid = g.localOpen ? db_lget_int("checkout", 0) : 0;
    if( vid ){
      show_common_info(vid, "checkout:", 1, 1);
    }
    fossil_print("check-ins:    %d\n",
             db_int(-1, "SELECT count(*) FROM event WHERE type='ci' /*scan*/"));
  }else{
    int rid;
    rid = name_to_rid(g.argv[2]);
    if( rid==0 ){
      fossil_fatal("no such object: %s\n", g.argv[2]);
    }
    show_common_info(rid, "uuid:", 1, 1);
  }
}

/*
** Show information about all tags on a given check-in.
*/
static void showTags(int rid){
  Stmt q;
  int cnt = 0;
  db_prepare(&q,
    "SELECT tag.tagid, tagname, "
    "       (SELECT uuid FROM blob WHERE rid=tagxref.srcid AND rid!=%d),"
    "       value, datetime(tagxref.mtime,toLocal()), tagtype,"
    "       (SELECT uuid FROM blob WHERE rid=tagxref.origid AND rid!=%d)"
    "  FROM tagxref JOIN tag ON tagxref.tagid=tag.tagid"
    " WHERE tagxref.rid=%d"
    " ORDER BY tagname /*sort*/", rid, rid, rid
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zTagname = db_column_text(&q, 1);
    const char *zSrcUuid = db_column_text(&q, 2);
    const char *zValue = db_column_text(&q, 3);
    const char *zDate = db_column_text(&q, 4);
    int tagtype = db_column_int(&q, 5);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309




310
311
312

313
314
315
316
317
318
319
  }
}

/*
** Show the context graph (immediate parents and children) for
** check-in rid.
*/
static void showContext(int rid){
  Blob sql;
  Stmt q;
  @ <div class="section">Context</div>
  blob_zero(&sql);
  blob_append(&sql, timeline_query_for_www(), -1);
  db_multi_exec(
     "CREATE TEMP TABLE IF NOT EXISTS ok(rid INTEGER PRIMARY KEY);"
     "INSERT INTO ok VALUES(%d);"
     "INSERT OR IGNORE INTO ok SELECT pid FROM plink WHERE cid=%d;"




     "INSERT OR IGNORE INTO ok SELECT cid FROM plink WHERE pid=%d;",
     rid, rid, rid
  );

  blob_append_sql(&sql, " AND event.objid IN ok ORDER BY mtime DESC");
  db_prepare(&q, "%s", blob_sql_text(&sql));
  www_print_timeline(&q, TIMELINE_DISJOINT|TIMELINE_GRAPH, 0, 0, rid, 0);
  db_finalize(&q);
}








|


<





|
>
>
>
>
|
<
|
>







293
294
295
296
297
298
299
300
301
302

303
304
305
306
307
308
309
310
311
312
313

314
315
316
317
318
319
320
321
322
  }
}

/*
** Show the context graph (immediate parents and children) for
** check-in rid.
*/
void render_checkin_context(int rid, int parentsOnly){
  Blob sql;
  Stmt q;

  blob_zero(&sql);
  blob_append(&sql, timeline_query_for_www(), -1);
  db_multi_exec(
     "CREATE TEMP TABLE IF NOT EXISTS ok(rid INTEGER PRIMARY KEY);"
     "INSERT INTO ok VALUES(%d);"
     "INSERT OR IGNORE INTO ok SELECT pid FROM plink WHERE cid=%d;",
     rid, rid
  );
  if( !parentsOnly ){
    db_multi_exec(
      "INSERT OR IGNORE INTO ok SELECT cid FROM plink WHERE pid=%d;", rid

    );
  }
  blob_append_sql(&sql, " AND event.objid IN ok ORDER BY mtime DESC");
  db_prepare(&q, "%s", blob_sql_text(&sql));
  www_print_timeline(&q, TIMELINE_DISJOINT|TIMELINE_GRAPH, 0, 0, rid, 0);
  db_finalize(&q);
}


544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
  zParent = db_text(0,
    "SELECT uuid FROM plink, blob"
    " WHERE plink.cid=%d AND blob.rid=plink.pid AND plink.isprim",
    rid
  );
  isLeaf = is_a_leaf(rid);
  db_prepare(&q1,
     "SELECT uuid, datetime(mtime%s), user, comment,"
     "       datetime(omtime%s), mtime"
     "  FROM blob, event"
     " WHERE blob.rid=%d"
     "   AND event.objid=%d",
     timeline_utc(), timeline_utc(), rid, rid
  );
  sideBySide = !is_false(PD("sbs","1"));
  if( db_step(&q1)==SQLITE_ROW ){
    const char *zUuid = db_column_text(&q1, 0);
    char *zEUser, *zEComment;
    const char *zUser;
    const char *zComment;






|
|



|







547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
  zParent = db_text(0,
    "SELECT uuid FROM plink, blob"
    " WHERE plink.cid=%d AND blob.rid=plink.pid AND plink.isprim",
    rid
  );
  isLeaf = is_a_leaf(rid);
  db_prepare(&q1,
     "SELECT uuid, datetime(mtime,toLocal()), user, comment,"
     "       datetime(omtime,toLocal()), mtime"
     "  FROM blob, event"
     " WHERE blob.rid=%d"
     "   AND event.objid=%d",
     rid, rid
  );
  sideBySide = !is_false(PD("sbs","1"));
  if( db_step(&q1)==SQLITE_ROW ){
    const char *zUuid = db_column_text(&q1, 0);
    char *zEUser, *zEComment;
    const char *zUser;
    const char *zComment;
596
597
598
599
600
601
602
603

604

605
606
607
608
609
610
611
      @ <tr><th>Original&nbsp;User:</th><td>
      hyperlink_to_user(zUser,zDate,"</td></tr>");
    }else{
      @ <tr><th>User:</th><td>
      hyperlink_to_user(zUser,zDate,"</td></tr>");
    }
    if( zEComment ){
      @ <tr><th>Edited&nbsp;Comment:</th><td class="infoComment">%!W(zEComment)</td></tr>

      @ <tr><th>Original&nbsp;Comment:</th><td class="infoComment">%!W(zComment)</td></tr>

    }else{
      @ <tr><th>Comment:</th><td class="infoComment">%!W(zComment)</td></tr>
    }
    if( g.perm.Admin ){
      db_prepare(&q2,
         "SELECT rcvfrom.ipaddr, user.login, datetime(rcvfrom.mtime)"
         "  FROM blob JOIN rcvfrom USING(rcvid) LEFT JOIN user USING(uid)"






|
>
|
>







599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
      @ <tr><th>Original&nbsp;User:</th><td>
      hyperlink_to_user(zUser,zDate,"</td></tr>");
    }else{
      @ <tr><th>User:</th><td>
      hyperlink_to_user(zUser,zDate,"</td></tr>");
    }
    if( zEComment ){
      @ <tr><th>Edited&nbsp;Comment:</th>
      @     <td class="infoComment">%!W(zEComment)</td></tr>
      @ <tr><th>Original&nbsp;Comment:</th>
      @     <td class="infoComment">%!W(zComment)</td></tr>
    }else{
      @ <tr><th>Comment:</th><td class="infoComment">%!W(zComment)</td></tr>
    }
    if( g.perm.Admin ){
      db_prepare(&q2,
         "SELECT rcvfrom.ipaddr, user.login, datetime(rcvfrom.mtime)"
         "  FROM blob JOIN rcvfrom USING(rcvid) LEFT JOIN user USING(uid)"
686
687
688
689
690
691
692

693
694
695
696
697
698
699
700
    @ </table>
  }else{
    style_header("Check-in Information");
    login_anonymous_available();
  }
  db_finalize(&q1);
  showTags(rid);

  showContext(rid);
  @ <div class="section">Changes</div>
  @ <div class="sectionmenu">
  verboseFlag = g.zPath[0]!='c';
  if( db_get_boolean("show-version-diffs", 0)==0 ){
    verboseFlag = !verboseFlag;
    zPage = "ci";
    zPageHide = "vinfo";






>
|







691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
    @ </table>
  }else{
    style_header("Check-in Information");
    login_anonymous_available();
  }
  db_finalize(&q1);
  showTags(rid);
  @ <div class="section">Context</div>
  render_checkin_context(rid, 0);
  @ <div class="section">Changes</div>
  @ <div class="sectionmenu">
  verboseFlag = g.zPath[0]!='c';
  if( db_get_boolean("show-version-diffs", 0)==0 ){
    verboseFlag = !verboseFlag;
    zPage = "ci";
    zPageHide = "vinfo";
724
725
726
727
728
729
730



731
732
733
734
735
736
737
    @ %z(xhref("class='button'","%R/%s/%T?sbs=1",zPage,zName))
    @ Show&nbsp;Side-by-Side&nbsp;Diffs</a>
  }
  if( zParent ){
    @ %z(xhref("class='button'","%R/vpatch?from=%!S&to=%!S",zParent,zUuid))
    @ Patch</a>
  }



  @</div>
  if( pRe ){
    @ <p><b>Only differences that match regular expression "%h(zRe)"
    @ are shown.</b></p>
  }
  db_prepare(&q3,
    "SELECT name,"






>
>
>







730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
    @ %z(xhref("class='button'","%R/%s/%T?sbs=1",zPage,zName))
    @ Show&nbsp;Side-by-Side&nbsp;Diffs</a>
  }
  if( zParent ){
    @ %z(xhref("class='button'","%R/vpatch?from=%!S&to=%!S",zParent,zUuid))
    @ Patch</a>
  }
  if( g.perm.Admin ){
    @ %z(xhref("class='button'","%R/mlink?ci=%!S",zUuid))MLink Table</a>
  }
  @</div>
  if( pRe ){
    @ <p><b>Only differences that match regular expression "%h(zRe)"
    @ are shown.</b></p>
  }
  db_prepare(&q3,
    "SELECT name,"
977
978
979
980
981
982
983

984
985
986
987
988
989
990
**   to=TAG          Right side of the comparison
**   branch=TAG      Show all changes on a particular branch
**   v=BOOLEAN       Default true.  If false, only list files that have changed
**   sbs=BOOLEAN     Side-by-side diff if true.  Unified diff if false
**   glob=STRING     only diff files matching this glob
**   dc=N            show N lines of context around each diff
**   w               ignore whitespace when computing diffs

**
**
** Show all differences between two check-ins.
*/
void vdiff_page(void){
  int ridFrom, ridTo;
  int verboseFlag = 0;






>







986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
**   to=TAG          Right side of the comparison
**   branch=TAG      Show all changes on a particular branch
**   v=BOOLEAN       Default true.  If false, only list files that have changed
**   sbs=BOOLEAN     Side-by-side diff if true.  Unified diff if false
**   glob=STRING     only diff files matching this glob
**   dc=N            show N lines of context around each diff
**   w               ignore whitespace when computing diffs
**   nohdr           omit the description at the top of the page
**
**
** Show all differences between two check-ins.
*/
void vdiff_page(void){
  int ridFrom, ridTo;
  int verboseFlag = 0;
1074
1075
1076
1077
1078
1079
1080

1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093

1094
1095
1096
1097
1098
1099
1100
      style_submenu_element("Ignore Whitespace", "ignorews",
                            "%R/vdiff?from=%T&to=%T&sbs=%d%s%s%T&w", zFrom, zTo,
                            sideBySide, (verboseFlag && !sideBySide)?"&v":"",
                            zGlob ? "&glob=" : "", zGlob ? zGlob : "");
    }
  }
  style_header("Check-in Differences");

  @ <h2>Difference From:</h2><blockquote>
  checkin_description(ridFrom);
  @ </blockquote><h2>To:</h2><blockquote>
  checkin_description(ridTo);
  @ </blockquote>
  if( pRe ){
    @ <p><b>Only differences that match regular expression "%h(zRe)"
    @ are shown.</b></p>
  }
  if( zGlob ){
    @ <p><b>Only files matching the glob "%h(zGlob)" are shown.</b></p>
  }
  @<hr /><p>


  manifest_file_rewind(pFrom);
  pFileFrom = manifest_file_next(pFrom, 0);
  manifest_file_rewind(pTo);
  pFileTo = manifest_file_next(pTo, 0);
  while( pFileFrom || pFileTo ){
    int cmp;






>
|
|
|
|
|
|
|
|
|
|
|
|
|
>







1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
      style_submenu_element("Ignore Whitespace", "ignorews",
                            "%R/vdiff?from=%T&to=%T&sbs=%d%s%s%T&w", zFrom, zTo,
                            sideBySide, (verboseFlag && !sideBySide)?"&v":"",
                            zGlob ? "&glob=" : "", zGlob ? zGlob : "");
    }
  }
  style_header("Check-in Differences");
  if( P("nohdr")==0 ){
    @ <h2>Difference From:</h2><blockquote>
    checkin_description(ridFrom);
    @ </blockquote><h2>To:</h2><blockquote>
    checkin_description(ridTo);
    @ </blockquote>
    if( pRe ){
      @ <p><b>Only differences that match regular expression "%h(zRe)"
      @ are shown.</b></p>
    }
    if( zGlob ){
      @ <p><b>Only files matching the glob "%h(zGlob)" are shown.</b></p>
    }
    @<hr /><p>
  }

  manifest_file_rewind(pFrom);
  pFileFrom = manifest_file_next(pFrom, 0);
  manifest_file_rewind(pTo);
  pFileTo = manifest_file_next(pTo, 0);
  while( pFileFrom || pFileTo ){
    int cmp;
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
      }else if( zType[0]=='t' ){
        @ Ticket change
        objType |= OBJTYPE_TICKET;
      }else if( zType[0]=='c' ){
        @ Manifest of check-in
        objType |= OBJTYPE_CHECKIN;
      }else if( zType[0]=='e' ){
        @ Instance of event
        objType |= OBJTYPE_EVENT;
        hyperlink_to_event_tagid(db_column_int(&q, 5));
      }else{
        @ Control file referencing
      }
      if( zType[0]!='e' ){
        hyperlink_to_uuid(zUuid);
      }
      @ - %!W(zCom) by
      hyperlink_to_user(zUser,zDate," on");
      hyperlink_to_date(zDate, ".");






|



|







1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
      }else if( zType[0]=='t' ){
        @ Ticket change
        objType |= OBJTYPE_TICKET;
      }else if( zType[0]=='c' ){
        @ Manifest of check-in
        objType |= OBJTYPE_CHECKIN;
      }else if( zType[0]=='e' ){
        @ Instance of technote
        objType |= OBJTYPE_EVENT;
        hyperlink_to_event_tagid(db_column_int(&q, 5));
      }else{
        @ Tag referencing
      }
      if( zType[0]!='e' ){
        hyperlink_to_uuid(zUuid);
      }
      @ - %!W(zCom) by
      hyperlink_to_user(zUser,zDate," on");
      hyperlink_to_date(zDate, ".");
1391
1392
1393
1394
1395
1396
1397





1398
1399
1400
1401
1402
1403
1404
1405
1406
    cnt++;
    if( pDownloadName && blob_size(pDownloadName)==0 ){
      blob_append(pDownloadName, zFilename, -1);
    }
    tag_private_status(rid);
  }
  db_finalize(&q);





  if( cnt==0 ){
    @ Control artifact.
    if( pDownloadName && blob_size(pDownloadName)==0 ){
      blob_appendf(pDownloadName, "%S.txt", zUuid);
    }
    tag_private_status(rid);
  }
  return objType;
}






>
>
>
>
>

|







1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
    cnt++;
    if( pDownloadName && blob_size(pDownloadName)==0 ){
      blob_append(pDownloadName, zFilename, -1);
    }
    tag_private_status(rid);
  }
  db_finalize(&q);
  if( db_exists("SELECT 1 FROM tagxref WHERE rid=%d AND tagid=%d",
                rid, TAG_CLUSTER) ){
    @ Cluster
    cnt++;
  }
  if( cnt==0 ){
    @ Unrecognized artifact
    if( pDownloadName && blob_size(pDownloadName)==0 ){
      blob_appendf(pDownloadName, "%S.txt", zUuid);
    }
    tag_private_status(rid);
  }
  return objType;
}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
  rid = name_to_rid_www("name");
  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( rid==0 ) fossil_redirect_home();
  if( g.perm.Admin ){
    const char *zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
    if( db_exists("SELECT 1 FROM shun WHERE uuid=%Q", zUuid) ){
      style_submenu_element("Unshun","Unshun", "%s/shun?accept=%s&sub=1#delshun",
            g.zTop, zUuid);
    }else{
      style_submenu_element("Shun","Shun", "%s/shun?shun=%s#addshun",
            g.zTop, zUuid);
    }
  }
  style_header("Hex Artifact Content");






|







1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
  rid = name_to_rid_www("name");
  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( rid==0 ) fossil_redirect_home();
  if( g.perm.Admin ){
    const char *zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
    if( db_exists("SELECT 1 FROM shun WHERE uuid=%Q", zUuid) ){
      style_submenu_element("Unshun","Unshun","%s/shun?accept=%s&sub=1#delshun",
            g.zTop, zUuid);
    }else{
      style_submenu_element("Shun","Shun", "%s/shun?shun=%s#addshun",
            g.zTop, zUuid);
    }
  }
  style_header("Hex Artifact Content");
1798
1799
1800
1801
1802
1803
1804

1805
1806
1807
1808
1809
1810
1811
** Additional query parameters:
**
**   ln              - show line numbers
**   ln=N            - highlight line number N
**   ln=M-N          - highlight lines M through N inclusive
**   ln=M-N+Y-Z      - higllight lines M through N and Y through Z (inclusive)
**   verbose         - show more detail in the description

**
** The /artifact page show the complete content of a file
** identified by SHA1HASH as preformatted text.  The
** /whatis page shows only a description of the file.
*/
void artifact_page(void){
  int rid = 0;






>







1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
** Additional query parameters:
**
**   ln              - show line numbers
**   ln=N            - highlight line number N
**   ln=M-N          - highlight lines M through N inclusive
**   ln=M-N+Y-Z      - higllight lines M through N and Y through Z (inclusive)
**   verbose         - show more detail in the description
**   download        - redirect to the download (artifact page only)
**
** The /artifact page show the complete content of a file
** identified by SHA1HASH as preformatted text.  The
** /whatis page shows only a description of the file.
*/
void artifact_page(void){
  int rid = 0;
1827
1828
1829
1830
1831
1832
1833








1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851






1852
1853









1854
1855
1856
1857
1858
1859
1860
  if( rid==0 ){
    rid = name_to_rid_www("name");
  }

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( rid==0 ) fossil_redirect_home();








  if( g.perm.Admin ){
    const char *zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
    if( db_exists("SELECT 1 FROM shun WHERE uuid=%Q", zUuid) ){
      style_submenu_element("Unshun","Unshun", "%s/shun?accept=%s&sub=1#accshun",
            g.zTop, zUuid);
    }else{
      style_submenu_element("Shun","Shun", "%s/shun?shun=%s#addshun",
            g.zTop, zUuid);
    }
  }
  if( descOnly || P("verbose")!=0 ) objdescFlags |= OBJDESC_DETAIL;
  style_header("%s", descOnly ? "Artifact Description" : "Artifact Content");
  zUuid = db_text("?", "SELECT uuid FROM blob WHERE rid=%d", rid);
  if( g.perm.Setup ){
    @ <h2>Artifact %s(zUuid) (%d(rid)):</h2>
  }else{
    @ <h2>Artifact %s(zUuid):</h2>
  }






  blob_zero(&downloadName);
  objType = object_description(rid, objdescFlags, &downloadName);









  style_submenu_element("Download", "Download",
          "%R/raw/%T?name=%s", blob_str(&downloadName), zUuid);
  if( db_exists("SELECT 1 FROM mlink WHERE fid=%d", rid) ){
    style_submenu_element("Check-ins Using", "Check-ins Using",
          "%R/timeline?n=200&uf=%s",zUuid);
  }
  asText = P("txt")!=0;






>
>
>
>
>
>
>
>



|






<







>
>
>
>
>
>
|
<
>
>
>
>
>
>
>
>
>







1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869

1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883

1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
  if( rid==0 ){
    rid = name_to_rid_www("name");
  }

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  if( rid==0 ) fossil_redirect_home();
  if( descOnly || P("verbose")!=0 ) objdescFlags |= OBJDESC_DETAIL;
  blob_zero(&downloadName);
  objType = object_description(rid, objdescFlags, &downloadName);
  if( !descOnly && P("download")!=0 ){
    cgi_redirectf("%R/raw/%T?name=%s", blob_str(&downloadName),
          db_text("?", "SELECT uuid FROM blob WHERE rid=%d", rid));
    /*NOTREACHED*/
  }
  if( g.perm.Admin ){
    const char *zUuid = db_text("", "SELECT uuid FROM blob WHERE rid=%d", rid);
    if( db_exists("SELECT 1 FROM shun WHERE uuid=%Q", zUuid) ){
      style_submenu_element("Unshun","Unshun","%s/shun?accept=%s&sub=1#accshun",
            g.zTop, zUuid);
    }else{
      style_submenu_element("Shun","Shun", "%s/shun?shun=%s#addshun",
            g.zTop, zUuid);
    }
  }

  style_header("%s", descOnly ? "Artifact Description" : "Artifact Content");
  zUuid = db_text("?", "SELECT uuid FROM blob WHERE rid=%d", rid);
  if( g.perm.Setup ){
    @ <h2>Artifact %s(zUuid) (%d(rid)):</h2>
  }else{
    @ <h2>Artifact %s(zUuid):</h2>
  }
  if( g.perm.Admin ){
    Stmt q;
    db_prepare(&q,
      "SELECT coalesce(user.login,rcvfrom.uid),"
      "       datetime(rcvfrom.mtime), rcvfrom.ipaddr"
      "  FROM blob, rcvfrom LEFT JOIN user ON user.uid=rcvfrom.uid"
      " WHERE blob.rid=%d"

      "   AND rcvfrom.rcvid=blob.rcvid;", rid);
    while( db_step(&q)==SQLITE_ROW ){
      const char *zUser = db_column_text(&q,0);
      const char *zDate = db_column_text(&q,1);
      const char *zIp = db_column_text(&q,2);
      @ <p>Received on %s(zDate) from %h(zUser) at %h(zIp).</p>
    }
    db_finalize(&q);
  }
  style_submenu_element("Download", "Download",
          "%R/raw/%T?name=%s", blob_str(&downloadName), zUuid);
  if( db_exists("SELECT 1 FROM mlink WHERE fid=%d", rid) ){
    style_submenu_element("Check-ins Using", "Check-ins Using",
          "%R/timeline?n=200&uf=%s",zUuid);
  }
  asText = P("txt")!=0;
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918

1919
1920
1921
1922
1923
1924
1925
    content_get(rid, &content);
    if( renderAsWiki ){
      wiki_render_by_mimetype(&content, zMime);
    }else if( renderAsHtml ){
      @ <iframe src="%R/raw/%T(blob_str(&downloadName))?name=%s(zUuid)"
      @   width="100%%" frameborder="0" marginwidth="0" marginheight="0"
      @   sandbox="allow-same-origin"
      @   onload="this.height = this.contentDocument.documentElement.scrollHeight;">
      @ </iframe>
    }else{
      style_submenu_element("Hex","Hex", "%s/hexdump?name=%s", g.zTop, zUuid);
      blob_to_utf8_no_bom(&content, 0);
      zMime = mimetype_from_content(&content);
      @ <blockquote>
      if( zMime==0 ){
        const char *z;
        z = blob_str(&content);
        if( zLn ){
          output_text_with_line_numbers(z, zLn);
        }else{
          @ <pre>
          @ %h(z)
          @ </pre>
        }
      }else if( strncmp(zMime, "image/", 6)==0 ){

        @ <img src="%R/raw/%s(zUuid)?m=%s(zMime)" />
        style_submenu_element("Image", "Image",
                              "%R/raw/%s?m=%s", zUuid, zMime);
      }else{
        @ <i>(file is %d(blob_size(&content)) bytes of binary data)</i>
      }
      @ </blockquote>






|

















>







1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
    content_get(rid, &content);
    if( renderAsWiki ){
      wiki_render_by_mimetype(&content, zMime);
    }else if( renderAsHtml ){
      @ <iframe src="%R/raw/%T(blob_str(&downloadName))?name=%s(zUuid)"
      @   width="100%%" frameborder="0" marginwidth="0" marginheight="0"
      @   sandbox="allow-same-origin"
      @   onload="this.height=this.contentDocument.documentElement.scrollHeight;">
      @ </iframe>
    }else{
      style_submenu_element("Hex","Hex", "%s/hexdump?name=%s", g.zTop, zUuid);
      blob_to_utf8_no_bom(&content, 0);
      zMime = mimetype_from_content(&content);
      @ <blockquote>
      if( zMime==0 ){
        const char *z;
        z = blob_str(&content);
        if( zLn ){
          output_text_with_line_numbers(z, zLn);
        }else{
          @ <pre>
          @ %h(z)
          @ </pre>
        }
      }else if( strncmp(zMime, "image/", 6)==0 ){
        @ <i>(file is %d(blob_size(&content)) bytes of image data)</i><br>
        @ <img src="%R/raw/%s(zUuid)?m=%s(zMime)" />
        style_submenu_element("Image", "Image",
                              "%R/raw/%s?m=%s", zUuid, zMime);
      }else{
        @ <i>(file is %d(blob_size(&content)) bytes of binary data)</i>
      }
      @ </blockquote>
2274
2275
2276
2277
2278
2279
2280



























































































































2281
2282
2283
2284
2285
2286
2287
    }
    return 0;
  }
  while( fossil_isspace(zB[0]) ) zB++;
  while( fossil_isspace(zA[0]) ) zA++;
  return zA[0]==0 && zB[0]==0;
}




























































































































/*
** WEBPAGE: ci_edit
** URL:  /ci_edit?r=RID&c=NEWCOMMENT&u=NEWUSER
**
** Present a dialog for updating properties of a check-in.
**






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
    }
    return 0;
  }
  while( fossil_isspace(zB[0]) ) zB++;
  while( fossil_isspace(zA[0]) ) zA++;
  return zA[0]==0 && zB[0]==0;
}

/*
** The following methods operate on the newtags temporary table
** that is used to collect various changes to be added to a control
** artifact for a check-in edit.
*/
static void init_newtags(void){
  db_multi_exec("CREATE TEMP TABLE newtags(tag UNIQUE, prefix, value)");
}

static void change_special(
  const char *zName,    /* Name of the special tag */
  const char *zOp,      /* Operation prefix (e.g. +,-,*) */
  const char *zValue    /* Value of the tag */
){
  db_multi_exec("REPLACE INTO newtags VALUES(%Q,'%q',%Q)", zName, zOp, zValue);
}

static void change_sym_tag(const char *zTag, const char *zOp){
  db_multi_exec("REPLACE INTO newtags VALUES('sym-%q',%Q,NULL)", zTag, zOp);
}

static void cancel_special(const char *zTag){
  change_special(zTag,"-",0);
}

static void add_color(const char *zNewColor, int fPropagateColor){
  change_special("bgcolor",fPropagateColor ? "*" : "+", zNewColor);
}

static void cancel_color(void){
  change_special("bgcolor","-",0);
}

static void add_comment(const char *zNewComment){
  change_special("comment","+",zNewComment);
}

static void add_date(const char *zNewDate){
  change_special("date","+",zNewDate);
}

static void add_user(const char *zNewUser){
  change_special("user","+",zNewUser);
}

static void add_tag(const char *zNewTag){
  change_sym_tag(zNewTag,"+");
}

static void cancel_tag(int rid, const char *zCancelTag){
  if( db_exists("SELECT 1 FROM tagxref, tag"
                " WHERE tagxref.rid=%d AND tagtype>0"
                "   AND tagxref.tagid=tag.tagid AND tagname='sym-%q'",
                rid, zCancelTag)
  ) change_sym_tag(zCancelTag,"-");
}

static void hide_branch(void){
  change_special("hidden","*",0);
}

static void close_leaf(int rid){
  change_special("closed",is_a_leaf(rid)?"+":"*",0);
}

static void change_branch(int rid, const char *zNewBranch){
  db_multi_exec(
    "REPLACE INTO newtags "
    " SELECT tagname, '-', NULL FROM tagxref, tag"
    "  WHERE tagxref.rid=%d AND tagtype==2"
    "    AND tagname GLOB 'sym-*'"
    "    AND tag.tagid=tagxref.tagid",
    rid
  );
  change_special("branch","*",zNewBranch);
  change_sym_tag(zNewBranch,"*");
}

/*
** The apply_newtags method is called after all newtags have been added
** and the control artifact is completed and then written to the DB.
*/
static void apply_newtags(Blob *ctrl, int rid, const char *zUuid){
  Stmt q;
  int nChng = 0;

  db_prepare(&q, "SELECT tag, prefix, value FROM newtags"
                 " ORDER BY prefix || tag");
  while( db_step(&q)==SQLITE_ROW ){
    const char *zTag = db_column_text(&q, 0);
    const char *zPrefix = db_column_text(&q, 1);
    const char *zValue = db_column_text(&q, 2);
    nChng++;
    if( zValue ){
      blob_appendf(ctrl, "T %s%F %s %F\n", zPrefix, zTag, zUuid, zValue);
    }else{
      blob_appendf(ctrl, "T %s%F %s\n", zPrefix, zTag, zUuid);
    }
  }
  db_finalize(&q);
  if( nChng>0 ){
    int nrid;
    Blob cksum;
    blob_appendf(ctrl, "U %F\n", login_name());
    md5sum_blob(ctrl, &cksum);
    blob_appendf(ctrl, "Z %b\n", &cksum);
    db_begin_transaction();
    g.markPrivate = content_is_private(rid);
    nrid = content_put(ctrl);
    manifest_crosslink(nrid, ctrl, MC_PERMIT_HOOKS);
    assert( blob_is_reset(ctrl) );
    db_end_transaction(0);
  }
}

/*
** This method checks that the date can be parsed.
** Returns 1 if datetime() can validate, 0 otherwise.
*/
int is_datetime(const char* zDate){
  return db_int(0, "SELECT datetime(%Q) NOT NULL", zDate);
}

/*
** WEBPAGE: ci_edit
** URL:  /ci_edit?r=RID&c=NEWCOMMENT&u=NEWUSER
**
** Present a dialog for updating properties of a check-in.
**
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
  zNewBrFlag = P("newbr") ? " checked" : "";
  zNewBranch = PDT("brname","");
  zCloseFlag = P("close") ? " checked" : "";
  zHideFlag = P("hide") ? " checked" : "";
  if( P("apply") ){
    Blob ctrl;
    char *zNow;
    int nChng = 0;

    login_verify_csrf_secret();
    blob_zero(&ctrl);
    zNow = date_in_standard_format(zChngTime ? zChngTime : "now");
    blob_appendf(&ctrl, "D %s\n", zNow);
    db_multi_exec("CREATE TEMP TABLE newtags(tag UNIQUE, prefix, value)");
    if( zNewColor[0]
     && (fPropagateColor!=fNewPropagateColor
             || fossil_strcmp(zColor,zNewColor)!=0)
    ){
      char *zPrefix = "+";
      if( fNewPropagateColor ){
        zPrefix = "*";
      }
      db_multi_exec("REPLACE INTO newtags VALUES('bgcolor',%Q,%Q)",
                    zPrefix, zNewColor);
    }
    if( zNewColor[0]==0 && zColor[0]!=0 ){
      db_multi_exec("REPLACE INTO newtags VALUES('bgcolor','-',NULL)");
    }
    if( comment_compare(zComment,zNewComment)==0 ){
      db_multi_exec("REPLACE INTO newtags VALUES('comment','+',%Q)",
                    zNewComment);
    }
    if( fossil_strcmp(zDate,zNewDate)!=0 ){
      db_multi_exec("REPLACE INTO newtags VALUES('date','+',%Q)",
                    zNewDate);
    }
    if( fossil_strcmp(zUser,zNewUser)!=0 ){
      db_multi_exec("REPLACE INTO newtags VALUES('user','+',%Q)", zNewUser);
    }
    db_prepare(&q,
       "SELECT tag.tagid, tagname FROM tagxref, tag"
       " WHERE tagxref.rid=%d AND tagtype>0 AND tagxref.tagid=tag.tagid",
       rid
    );
    while( db_step(&q)==SQLITE_ROW ){
      int tagid = db_column_int(&q, 0);
      const char *zTag = db_column_text(&q, 1);
      char zLabel[30];
      sqlite3_snprintf(sizeof(zLabel), zLabel, "c%d", tagid);
      if( P(zLabel) ){
        db_multi_exec("REPLACE INTO newtags VALUES(%Q,'-',NULL)", zTag);
      }
    }
    db_finalize(&q);
    if( zHideFlag[0] ){
      db_multi_exec("REPLACE INTO newtags VALUES('hidden','*',NULL)");
    }
    if( zCloseFlag[0] ){
      db_multi_exec("REPLACE INTO newtags VALUES('closed','%s',NULL)",
          is_a_leaf(rid)?"+":"*");
    }
    if( zNewTagFlag[0] && zNewTag[0] ){
      db_multi_exec("REPLACE INTO newtags VALUES('sym-%q','+',NULL)", zNewTag);
    }
    if( zNewBrFlag[0] && zNewBranch[0] ){
      db_multi_exec(
        "REPLACE INTO newtags "
        " SELECT tagname, '-', NULL FROM tagxref, tag"
        "  WHERE tagxref.rid=%d AND tagtype==2"
        "    AND tagname GLOB 'sym-*'"
        "    AND tag.tagid=tagxref.tagid",
        rid
      );
      db_multi_exec("REPLACE INTO newtags VALUES('branch','*',%Q)", zNewBranch);
      db_multi_exec("REPLACE INTO newtags VALUES('sym-%q','*',NULL)",
                    zNewBranch);
    }
    db_prepare(&q, "SELECT tag, prefix, value FROM newtags"
                   " ORDER BY prefix || tag");
    while( db_step(&q)==SQLITE_ROW ){
      const char *zTag = db_column_text(&q, 0);
      const char *zPrefix = db_column_text(&q, 1);
      const char *zValue = db_column_text(&q, 2);
      nChng++;
      if( zValue ){
        blob_appendf(&ctrl, "T %s%F %s %F\n", zPrefix, zTag, zUuid, zValue);
      }else{
        blob_appendf(&ctrl, "T %s%F %s\n", zPrefix, zTag, zUuid);
      }
    }
    db_finalize(&q);
    if( nChng>0 ){
      int nrid;
      Blob cksum;
      blob_appendf(&ctrl, "U %F\n", login_name());
      md5sum_blob(&ctrl, &cksum);
      blob_appendf(&ctrl, "Z %b\n", &cksum);
      db_begin_transaction();
      g.markPrivate = content_is_private(rid);
      nrid = content_put(&ctrl);
      manifest_crosslink(nrid, &ctrl, MC_PERMIT_HOOKS);
      assert( blob_is_reset(&ctrl) );
      db_end_transaction(0);
    }
    cgi_redirectf("ci?name=%s", zUuid);
  }
  blob_zero(&comment);
  blob_append(&comment, zNewComment, -1);
  zUuid[10] = 0;
  style_header("Edit Check-in [%s]", zUuid);
  /*






<





|



<
<
|
<
<
<
<
<
|
<
<
|
<
<
<
|
<
<
<
|
<
<










|
<
|
<

|
<
<
|
<
<
<
|
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
<
<
<
<
<
<
<
<







2515
2516
2517
2518
2519
2520
2521

2522
2523
2524
2525
2526
2527
2528
2529
2530


2531





2532


2533



2534



2535


2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546

2547

2548
2549


2550



2551


2552






























2553








2554
2555
2556
2557
2558
2559
2560
  zNewBrFlag = P("newbr") ? " checked" : "";
  zNewBranch = PDT("brname","");
  zCloseFlag = P("close") ? " checked" : "";
  zHideFlag = P("hide") ? " checked" : "";
  if( P("apply") ){
    Blob ctrl;
    char *zNow;


    login_verify_csrf_secret();
    blob_zero(&ctrl);
    zNow = date_in_standard_format(zChngTime ? zChngTime : "now");
    blob_appendf(&ctrl, "D %s\n", zNow);
    init_newtags();
    if( zNewColor[0]
     && (fPropagateColor!=fNewPropagateColor
             || fossil_strcmp(zColor,zNewColor)!=0)


    ) add_color(zNewColor,fNewPropagateColor);





    if( zNewColor[0]==0 && zColor[0]!=0 ) cancel_color();


    if( comment_compare(zComment,zNewComment)==0 ) add_comment(zNewComment);



    if( fossil_strcmp(zDate,zNewDate)!=0 ) add_date(zNewDate);



    if( fossil_strcmp(zUser,zNewUser)!=0 ) add_user(zNewUser);


    db_prepare(&q,
       "SELECT tag.tagid, tagname FROM tagxref, tag"
       " WHERE tagxref.rid=%d AND tagtype>0 AND tagxref.tagid=tag.tagid",
       rid
    );
    while( db_step(&q)==SQLITE_ROW ){
      int tagid = db_column_int(&q, 0);
      const char *zTag = db_column_text(&q, 1);
      char zLabel[30];
      sqlite3_snprintf(sizeof(zLabel), zLabel, "c%d", tagid);
      if( P(zLabel) ) cancel_special(zTag);

    }

    db_finalize(&q);
    if( zHideFlag[0] ) hide_branch();


    if( zCloseFlag[0] ) close_leaf(rid);



    if( zNewTagFlag[0] && zNewTag[0] ) add_tag(zNewTag);


    if( zNewBrFlag[0] && zNewBranch[0] ) change_branch(rid,zNewBranch);






























    apply_newtags(&ctrl, rid, zUuid);








    cgi_redirectf("ci?name=%s", zUuid);
  }
  blob_zero(&comment);
  blob_append(&comment, zNewComment, -1);
  zUuid[10] = 0;
  style_header("Edit Check-in [%s]", zUuid);
  /*
2650
2651
2652
2653
2654
2655
2656















































































































































































































  @ <input type="submit" name="apply" value="Apply Changes" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </td></tr>
  @ </table>
  @ </div></form>
  style_footer();
}





















































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
  @ <input type="submit" name="apply" value="Apply Changes" />
  @ <input type="submit" name="cancel" value="Cancel" />
  @ </td></tr>
  @ </table>
  @ </div></form>
  style_footer();
}

/*
** Prepare an ammended commit comment.  Let the user modify it using the
** editor specified in the global_config table or either
** the VISUAL or EDITOR environment variable.
**
** Store the final commit comment in pComment.  pComment is assumed
** to be uninitialized - any prior content is overwritten.
**
** Use zInit to initialize the check-in comment so that the user does
** not have to retype.
*/
static void prepare_amend_comment(
  Blob *pComment,
  const char *zInit,
  const char *zUuid
){
  Blob prompt;
#if defined(_WIN32) || defined(__CYGWIN__)
  int bomSize;
  const unsigned char *bom = get_utf8_bom(&bomSize);
  blob_init(&prompt, (const char *) bom, bomSize);
  if( zInit && zInit[0]){
    blob_append(&prompt, zInit, -1);
  }
#else
  blob_init(&prompt, zInit, -1);
#endif
  blob_append(&prompt, "\n# Enter a new comment for check-in ", -1);
  if( zUuid && zUuid[0] ){
    blob_append(&prompt, zUuid, -1);
  }
  blob_append(&prompt, ".\n# Lines beginning with a # are ignored.\n", -1);
  prompt_for_user_comment(pComment, &prompt);
  blob_reset(&prompt);
}

#define AMEND_USAGE_STMT "UUID OPTION ?OPTION ...?"
/*
** COMMAND: amend
**
** Usage: %fossil amend UUID OPTION ?OPTION ...?
**
** Amend the tags on check-in UUID to change how it displays in the timeline.
**
** Options:
**
**    --author USER           Make USER the author for check-in
**    -m|--comment COMMENT    Make COMMENT the check-in comment
**    -M|--message-file FILE  Read the amended comment from FILE
**    -e|--edit-comment       Launch editor to revise comment
**    --date DATE             Make DATE the check-in time
**    --bgcolor COLOR         Apply COLOR to this check-in
**    --branchcolor COLOR     Apply and propagate COLOR to the branch
**    --tag TAG               Add new TAG to this check-in
**    --cancel TAG            Cancel TAG from this check-in
**    --branch NAME           Make this check-in the start of branch NAME
**    --hide                  Hide branch starting from this check-in
**    --close                 Mark this "leaf" as closed
*/
void ci_amend_cmd(void){
  int rid;
  const char *zComment;         /* Current comment on the check-in */
  const char *zNewComment;      /* Revised check-in comment */
  const char *zComFile;         /* Filename from which to read comment */
  const char *zUser;            /* Current user for the check-in */
  const char *zNewUser;         /* Revised user */
  const char *zDate;            /* Current date of the check-in */
  const char *zNewDate;         /* Revised check-in date */
  const char *zColor;
  const char *zNewColor;
  const char *zNewBrColor;
  const char *zNewBranch;
  const char **pzNewTags = 0;
  const char **pzCancelTags = 0;
  int fClose;                   /* True if leaf should be closed */
  int fHide;                    /* True if branch should be hidden */
  int fPropagateColor;          /* True if color propagates before amend */
  int fNewPropagateColor = 0;   /* True if color propagates after amend */
  int fHasHidden = 0;           /* True if hidden tag already set */
  int fHasClosed = 0;           /* True if closed tag already set */
  int fEditComment;             /* True if editor to be used for comment */
  const char *zChngTime;        /* The change time on the control artifact */
  const char *zUuid;
  Blob ctrl;
  Blob comment;
  char *zNow;
  int nTags, nCancels;
  int i;
  Stmt q;

  if( g.argc==3 ) usage(AMEND_USAGE_STMT);
  fEditComment = find_option("edit-comment","e",0)!=0;
  zNewComment = find_option("comment","m",1);
  zComFile = find_option("message-file","M",1);
  zNewBranch = find_option("branch",0,1);
  zNewColor = find_option("bgcolor",0,1);
  zNewBrColor = find_option("branchcolor",0,1);
  if( zNewBrColor ){
    zNewColor = zNewBrColor;
    fNewPropagateColor = 1;
  }
  zNewDate = find_option("date",0,1);
  zNewUser = find_option("author",0,1);
  pzNewTags = find_repeatable_option("tag",0,&nTags);
  pzCancelTags = find_repeatable_option("cancel",0,&nCancels);
  fClose = find_option("close",0,0)!=0;
  fHide = find_option("hide",0,0)!=0;
  zChngTime = find_option("chngtime",0,1);
  db_find_and_open_repository(0,0);
  user_select();
  verify_all_options();
  if( g.argc<3 || g.argc>=4 ) usage(AMEND_USAGE_STMT);
  rid = name_to_typed_rid(g.argv[2], "ci");
  if( rid==0 && !is_a_version(rid) ) fossil_fatal("no such check-in");
  zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
  if( zUuid==0 ) fossil_fatal("Unable to find UUID");
  zComment = db_text(0, "SELECT coalesce(ecomment,comment)"
                        "  FROM event WHERE objid=%d", rid);
  zUser = db_text(0, "SELECT coalesce(euser,user)"
                     "  FROM event WHERE objid=%d", rid);
  zDate = db_text(0, "SELECT datetime(mtime)"
                     "  FROM event WHERE objid=%d", rid);
  zColor = db_text("", "SELECT bgcolor"
                        "  FROM event WHERE objid=%d", rid);
  fPropagateColor = db_int(0, "SELECT tagtype FROM tagxref"
                              " WHERE rid=%d AND tagid=%d",
                              rid, TAG_BGCOLOR)==2;
  fNewPropagateColor = zNewColor && zNewColor[0]
                        ? fNewPropagateColor : fPropagateColor;
  db_prepare(&q,
     "SELECT tag.tagid FROM tagxref, tag"
     " WHERE tagxref.rid=%d AND tagtype>0 AND tagxref.tagid=tag.tagid",
     rid
  );
  while( db_step(&q)==SQLITE_ROW ){
    int tagid = db_column_int(&q, 0);

    if( tagid == TAG_CLOSED ){
      fHasClosed = 1;
    }else if( tagid==TAG_HIDDEN ){
      fHasHidden = 1;
    }else{
      continue;
    }
  }
  db_finalize(&q);
  blob_zero(&ctrl);
  zNow = date_in_standard_format(zChngTime && zChngTime[0] ? zChngTime : "now");
  blob_appendf(&ctrl, "D %s\n", zNow);
  init_newtags();
  if( zNewColor && zNewColor[0]
      && (fPropagateColor!=fNewPropagateColor
            || fossil_strcmp(zColor,zNewColor)!=0)
  ){
    add_color(
      mprintf("%s%s", (zNewColor[0]!='#' &&
        validate16(zNewColor,strlen(zNewColor)) &&
        (strlen(zNewColor)==6 || strlen(zNewColor)==3)) ? "#" : "",
        zNewColor
      ),
      fNewPropagateColor
    );
  }
  if( (zNewColor!=0 && zNewColor[0]==0) && (zColor && zColor[0] ) ){
    cancel_color();
  }
  if( fEditComment ){
    prepare_amend_comment(&comment, zComment, zUuid);
    zNewComment = blob_str(&comment);
  }else if( zComFile ){
    blob_zero(&comment);
    blob_read_from_file(&comment, zComFile);
    blob_to_utf8_no_bom(&comment, 1);
    zNewComment = blob_str(&comment);
  }
  if( zNewComment && zNewComment[0]
      && comment_compare(zComment,zNewComment)==0 ) add_comment(zNewComment);
  if( zNewDate && zNewDate[0] && fossil_strcmp(zDate,zNewDate)!=0 ){
    if( is_datetime(zNewDate) ){
      add_date(zNewDate);
    }else{
      fossil_fatal("Unsupported date format, use YYYY-MM-DD HH:MM:SS");
    }
  }
  if( zNewUser && zNewUser[0] && fossil_strcmp(zUser,zNewUser)!=0 ){
    add_user(zNewUser);
  }
  if( pzNewTags!=0 ){
    for(i=0; i<nTags; i++){
      if( pzNewTags[i] && pzNewTags[i][0] ) add_tag(pzNewTags[i]);
    }
    fossil_free((void *)pzNewTags);
  }
  if( pzCancelTags!=0 ){
    for(i=0; i<nCancels; i++){
      if( pzCancelTags[i] && pzCancelTags[i][0] )
        cancel_tag(rid,pzCancelTags[i]);
    }
    fossil_free((void *)pzCancelTags);
  }
  if( fHide && !fHasHidden ) hide_branch();
  if( fClose && !fHasClosed ) close_leaf(rid);
  if( zNewBranch && zNewBranch[0] ) change_branch(rid,zNewBranch);
  apply_newtags(&ctrl, rid, zUuid);
  show_common_info(rid, "uuid:", 1, 0);
}
Changes to src/json.c.
220
221
222
223
224
225
226

227
228
229
230
231
232
233
** incorrectly removes it from the gc (which we never do). If this
** function fails, it is fatal to the app (as it indicates an
** allocation error (more likely than not) or a serious internal error
** such as numeric overflow).
*/
void json_gc_add( char const * key, cson_value * v ){
  int const rc = cson_array_append( g.json.gc.a, v );

  assert( NULL != g.json.gc.a );
  if( 0 != rc ){
    cson_value_free( v );
  }
  assert( (0==rc) && "Adding item to GC failed." );
  if(0!=rc){
    fprintf(stderr,"%s: FATAL: alloc error.\n", g.argv[0])






>







220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
** incorrectly removes it from the gc (which we never do). If this
** function fails, it is fatal to the app (as it indicates an
** allocation error (more likely than not) or a serious internal error
** such as numeric overflow).
*/
void json_gc_add( char const * key, cson_value * v ){
  int const rc = cson_array_append( g.json.gc.a, v );

  assert( NULL != g.json.gc.a );
  if( 0 != rc ){
    cson_value_free( v );
  }
  assert( (0==rc) && "Adding item to GC failed." );
  if(0!=rc){
    fprintf(stderr,"%s: FATAL: alloc error.\n", g.argv[0])
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
/*
** The boolean equivalent of json_find_option_cstr().
** If the option is not found, dftl is returned.
*/
int json_find_option_bool(char const * zKey,
                          char const * zCLILong,
                          char const * zCLIShort,
                          char dflt ){
  int rc = -1;
  if(!g.isHTTP){
    if(NULL != find_option(zCLILong ? zCLILong : zKey,
                           zCLIShort, 0)){
      rc = 1;
    }
  }






|







477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
/*
** The boolean equivalent of json_find_option_cstr().
** If the option is not found, dftl is returned.
*/
int json_find_option_bool(char const * zKey,
                          char const * zCLILong,
                          char const * zCLIShort,
                          int dflt ){
  int rc = -1;
  if(!g.isHTTP){
    if(NULL != find_option(zCLILong ? zCLILong : zKey,
                           zCLIShort, 0)){
      rc = 1;
    }
  }
629
630
631
632
633
634
635

636
637
638
639
640
641
642
643
** we will not be able to replace fossil's internal idea of the auth
** info in time (and future changes to that state may cause unexpected
** results).
**
** The result of this call are cached for future calls.
*/
cson_value * json_auth_token(){

  if( !g.json.authToken ){
    /* Try to get an authorization token from GET parameter, POSTed
       JSON, or fossil cookie (in that order). */
    g.json.authToken = json_getenv(FossilJsonKeys.authToken);
    if(g.json.authToken
       && cson_value_is_string(g.json.authToken)
       && !PD(login_cookie_name(),NULL)){
      /* tell fossil to use this login info.






>
|







630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
** we will not be able to replace fossil's internal idea of the auth
** info in time (and future changes to that state may cause unexpected
** results).
**
** The result of this call are cached for future calls.
*/
cson_value * json_auth_token(){
    assert(g.json.gc.a && "json_main_bootstrap() was not called!");
    if( !g.json.authToken ){
    /* Try to get an authorization token from GET parameter, POSTed
       JSON, or fossil cookie (in that order). */
    g.json.authToken = json_getenv(FossilJsonKeys.authToken);
    if(g.json.authToken
       && cson_value_is_string(g.json.authToken)
       && !PD(login_cookie_name(),NULL)){
      /* tell fossil to use this login info.
695
696
697
698
699
700
701
702
703
704
705
706
707
708

709

710
711
712
713
714
715
716
*/
void json_main_bootstrap(){
  cson_value * v;
  assert( (NULL == g.json.gc.v) &&
          "json_main_bootstrap() was called twice!" );

  g.json.timerId = fossil_timer_start();
  
  /* g.json.gc is our "garbage collector" - where we put JSON values
     which need a long lifetime but don't have a logical parent to put
     them in.
  */
  v = cson_value_new_array();
  g.json.gc.v = v;

  g.json.gc.a = cson_value_get_array(v);

  cson_value_add_reference(v)
    /* Needed to allow us to include this value in other JSON
       containers without transferring ownership to those containers.
       All other persistent g.json.XXX.v values get appended to
       g.json.gc.a, and therefore already have a live reference
       for this purpose.
    */






|






>

>







697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
*/
void json_main_bootstrap(){
  cson_value * v;
  assert( (NULL == g.json.gc.v) &&
          "json_main_bootstrap() was called twice!" );

  g.json.timerId = fossil_timer_start();

  /* g.json.gc is our "garbage collector" - where we put JSON values
     which need a long lifetime but don't have a logical parent to put
     them in.
  */
  v = cson_value_new_array();
  g.json.gc.v = v;
  assert(0 != g.json.gc.v);
  g.json.gc.a = cson_value_get_array(v);
  assert(0 != g.json.gc.a);
  cson_value_add_reference(v)
    /* Needed to allow us to include this value in other JSON
       containers without transferring ownership to those containers.
       All other persistent g.json.XXX.v values get appended to
       g.json.gc.a, and therefore already have a live reference
       for this purpose.
    */
753
754
755
756
757
758
759

760
761
762
763
764
765
766
** for consistency with how json_err() works.
*/
void json_warn( int code, char const * fmt, ... ){
  cson_object * obj = NULL;
  assert( (code>FSL_JSON_W_START)
          && (code<FSL_JSON_W_END)
          && "Invalid warning code.");

  if(!g.json.warnings){
    g.json.warnings = cson_new_array();
    assert((NULL != g.json.warnings) && "Alloc error.");
    json_gc_add("$WARNINGS",cson_array_value(g.json.warnings));
  }
  obj = cson_new_object();
  cson_array_append(g.json.warnings, cson_object_value(obj));






>







757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
** for consistency with how json_err() works.
*/
void json_warn( int code, char const * fmt, ... ){
  cson_object * obj = NULL;
  assert( (code>FSL_JSON_W_START)
          && (code<FSL_JSON_W_END)
          && "Invalid warning code.");
  assert(g.json.gc.a && "json_main_bootstrap() was not called!");
  if(!g.json.warnings){
    g.json.warnings = cson_new_array();
    assert((NULL != g.json.warnings) && "Alloc error.");
    json_gc_add("$WARNINGS",cson_array_value(g.json.warnings));
  }
  obj = cson_new_object();
  cson_array_append(g.json.warnings, cson_object_value(obj));
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
** Achtung: leading and trailing whitespace of elements are elided.
**
** Achtung: empty elements will be skipped, meaning consecutive empty
** elements are collapsed.
*/
int json_string_split( char const * zStr,
                       char separator,
                       char doDeHttp,
                       cson_array * target ){
  char const * p = zStr /* current byte */;
  char const * head  /* current start-of-token */;
  unsigned int len = 0   /* current token's length */;
  int rc = 0   /* return code (number of added elements)*/;
  assert( zStr && target );
  while( fossil_isspace(*p) ){






|







804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
** Achtung: leading and trailing whitespace of elements are elided.
**
** Achtung: empty elements will be skipped, meaning consecutive empty
** elements are collapsed.
*/
int json_string_split( char const * zStr,
                       char separator,
                       int doDeHttp,
                       cson_array * target ){
  char const * p = zStr /* current byte */;
  char const * head  /* current start-of-token */;
  unsigned int len = 0   /* current token's length */;
  int rc = 0   /* return code (number of added elements)*/;
  assert( zStr && target );
  while( fossil_isspace(*p) ){
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
** in any way or produced no tokens).
**
** The returned value is owned by the caller. If not NULL then it
** _will_ have a JSON type of Array.
*/
cson_value * json_string_split2( char const * zStr,
                                 char separator,
                                 char doDeHttp ){
  cson_array * a = cson_new_array();
  int rc = json_string_split( zStr, separator, doDeHttp, a );
  if( 0>=rc ){
    cson_free_array(a);
    a = NULL;
  }
  return a ? cson_array_value(a) : NULL;






|







879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
** in any way or produced no tokens).
**
** The returned value is owned by the caller. If not NULL then it
** _will_ have a JSON type of Array.
*/
cson_value * json_string_split2( char const * zStr,
                                 char separator,
                                 int doDeHttp ){
  cson_array * a = cson_new_array();
  int rc = json_string_split( zStr, separator, doDeHttp, a );
  if( 0>=rc ){
    cson_free_array(a);
    a = NULL;
  }
  return a ? cson_array_value(a) : NULL;
902
903
904
905
906
907
908

909
910
911
912
913
914
915
** before they do any work.
**
** This must only be called once, or an assertion may be triggered.
*/
static void json_mode_bootstrap(){
  static char once = 0  /* guard against multiple runs */;
  char const * zPath = P("PATH_INFO");

  assert( (0==once) && "json_mode_bootstrap() called too many times!");
  if( once ){
    return;
  }else{
    once = 1;
  }
  g.json.isJsonMode = 1;






>







907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
** before they do any work.
**
** This must only be called once, or an assertion may be triggered.
*/
static void json_mode_bootstrap(){
  static char once = 0  /* guard against multiple runs */;
  char const * zPath = P("PATH_INFO");
  assert(g.json.gc.a && "json_main_bootstrap() was not called!");
  assert( (0==once) && "json_mode_bootstrap() called too many times!");
  if( once ){
    return;
  }else{
    once = 1;
  }
  g.json.isJsonMode = 1;
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
** invalidated if that object is modified (depending on how it is
** modified).
**
** Note that CLI options are not included in the command path. Use
** find_option() to get those.
**
*/
char const * json_command_arg(unsigned char ndx){
  cson_array * ar = g.json.cmd.a;
  assert((NULL!=ar) && "Internal error. Was json_mode_bootstrap() called?");
  assert((g.argc>1) && "Internal error - we never should have gotten this far.");
  if( g.json.cmd.offset < 0 ){
    /* first-time setup. */
    short i = 0;
#define NEXT cson_string_cstr(          \






|







1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
** invalidated if that object is modified (depending on how it is
** modified).
**
** Note that CLI options are not included in the command path. Use
** find_option() to get those.
**
*/
char const * json_command_arg(unsigned short ndx){
  cson_array * ar = g.json.cmd.a;
  assert((NULL!=ar) && "Internal error. Was json_mode_bootstrap() called?");
  assert((g.argc>1) && "Internal error - we never should have gotten this far.");
  if( g.json.cmd.offset < 0 ){
    /* first-time setup. */
    short i = 0;
#define NEXT cson_string_cstr(          \
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
** If !g.isHTTP then alsoOutput is ignored and all output is sent to
** stdout immediately.
**
** For generating the resultText property: if msg is not NULL then it
** is used as-is. If it is NULL then g.zErrMsg is checked, and if that
** is NULL then json_err_cstr(code) is used.
*/
void json_err( int code, char const * msg, char alsoOutput ){
  int rc = code ? code : (g.json.resultCode
                          ? g.json.resultCode
                          : FSL_JSON_E_UNKNOWN);
  cson_value * resp = NULL;
  rc = json_dumbdown_rc(rc);
  if( rc && !msg ){
    msg = g.zErrMsg;






|







1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
** If !g.isHTTP then alsoOutput is ignored and all output is sent to
** stdout immediately.
**
** For generating the resultText property: if msg is not NULL then it
** is used as-is. If it is NULL then g.zErrMsg is checked, and if that
** is NULL then json_err_cstr(code) is used.
*/
void json_err( int code, char const * msg, int alsoOutput ){
  int rc = code ? code : (g.json.resultCode
                          ? g.json.resultCode
                          : FSL_JSON_E_UNKNOWN);
  cson_value * resp = NULL;
  rc = json_dumbdown_rc(rc);
  if( rc && !msg ){
    msg = g.zErrMsg;
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
** pTgt has the same semantics as described for
** json_stmt_to_array_of_obj().
**
** FIXME: change this to take a (char const *) instead of a blob,
** to simplify the trivial use-cases (which don't need a Blob).
*/
cson_value * json_sql_to_array_of_obj(Blob * pSql, cson_array * pTgt,
                                      char resetBlob){
  Stmt q = empty_Stmt;
  cson_value * pay = NULL;
  assert( blob_size(pSql) > 0 );
  db_prepare(&q, "%s", blob_str(pSql) /*safe-for-%s*/);
  if(resetBlob){
    blob_reset(pSql);
  }






|







1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
** pTgt has the same semantics as described for
** json_stmt_to_array_of_obj().
**
** FIXME: change this to take a (char const *) instead of a blob,
** to simplify the trivial use-cases (which don't need a Blob).
*/
cson_value * json_sql_to_array_of_obj(Blob * pSql, cson_array * pTgt,
                                      int resetBlob){
  Stmt q = empty_Stmt;
  cson_value * pay = NULL;
  assert( blob_size(pSql) > 0 );
  db_prepare(&q, "%s", blob_str(pSql) /*safe-for-%s*/);
  if(resetBlob){
    blob_reset(pSql);
  }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
**
** See info_tags_of_checkin() for more details (this is simply a JSON
** wrapper for that function).
**
** If there are no tags then this function returns NULL, not an empty
** Array.
*/
cson_value * json_tags_for_checkin_rid(int rid, char propagatingOnly){
  cson_value * v = NULL;
  char * tags = info_tags_of_checkin(rid, propagatingOnly);
  if(tags){
    if(*tags){
      v = json_string_split2(tags,',',0);
    }
    free(tags);






|







1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
**
** See info_tags_of_checkin() for more details (this is simply a JSON
** wrapper for that function).
**
** If there are no tags then this function returns NULL, not an empty
** Array.
*/
cson_value * json_tags_for_checkin_rid(int rid, int propagatingOnly){
  cson_value * v = NULL;
  char * tags = info_tags_of_checkin(rid, propagatingOnly);
  if(tags){
    if(*tags){
      v = json_string_split2(tags,',',0);
    }
    free(tags);
2219
2220
2221
2222
2223
2224
2225

2226
2227
2228
2229
2230
2231
2232
**
** Pages under /json/... must be entered into JsonPageDefs.
** This function dispatches them, and is the HTTP equivalent of
** json_cmd_top().
*/
void json_page_top(void){
  char const * zCommand;

  json_mode_bootstrap();
  zCommand = json_command_arg(1);
  if(!zCommand || !*zCommand){
    json_dispatch_missing_args_err( JsonPageDefs,
                                    "No command (sub-path) specified."
                                    " Try one of: ");
    return;






>







2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
**
** Pages under /json/... must be entered into JsonPageDefs.
** This function dispatches them, and is the HTTP equivalent of
** json_cmd_top().
*/
void json_page_top(void){
  char const * zCommand;
  assert(g.json.gc.a && "json_main_bootstrap() was not called!");
  json_mode_bootstrap();
  zCommand = json_command_arg(1);
  if(!zCommand || !*zCommand){
    json_dispatch_missing_args_err( JsonPageDefs,
                                    "No command (sub-path) specified."
                                    " Try one of: ");
    return;
Changes to src/json_detail.h.
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  **
  ** <0 = CLI only, >0 = HTTP only, 0==both
  **
  ** Now that we can simulate POST in CLI mode, the distinction
  ** between them has disappeared in most (or all) cases, so 0 is
  ** the standard value.
  */
  char runMode;
} JsonPageDef;

/*
** Holds common keys used for various JSON API properties.
*/
typedef struct FossilJsonKeys_{
  /** maintainers: please keep alpha sorted (case-insensitive) */






|







182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
  **
  ** <0 = CLI only, >0 = HTTP only, 0==both
  **
  ** Now that we can simulate POST in CLI mode, the distinction
  ** between them has disappeared in most (or all) cases, so 0 is
  ** the standard value.
  */
  int runMode;
} JsonPageDef;

/*
** Holds common keys used for various JSON API properties.
*/
typedef struct FossilJsonKeys_{
  /** maintainers: please keep alpha sorted (case-insensitive) */
Changes to src/json_timeline.c.
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  int rc = 0;
  zAfter = json_find_option_cstr("after",NULL,"a");
  zBefore = zAfter ? NULL : json_find_option_cstr("before",NULL,"b");

  if(zAfter&&*zAfter){
    while( fossil_isspace(*zAfter) ) ++zAfter;
    blob_appendf(pSql,
                 " AND event.mtime>=(SELECT julianday(%Q,'utc')) "
                 " ORDER BY event.mtime ASC ",
                 zAfter);
    rc = 1;
  }else if(zBefore && *zBefore){
    while( fossil_isspace(*zBefore) ) ++zBefore;
    blob_appendf(pSql,
                 " AND event.mtime<=(SELECT julianday(%Q,'utc')) "
                 " ORDER BY event.mtime DESC ",
                 zBefore);
    rc = -1;
  }else{
    blob_append(pSql, " ORDER BY event.mtime DESC ", -1);
    rc = 0;
  }






|






|







226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
  int rc = 0;
  zAfter = json_find_option_cstr("after",NULL,"a");
  zBefore = zAfter ? NULL : json_find_option_cstr("before",NULL,"b");

  if(zAfter&&*zAfter){
    while( fossil_isspace(*zAfter) ) ++zAfter;
    blob_appendf(pSql,
                 " AND event.mtime>=(SELECT julianday(%Q,fromLocal())) "
                 " ORDER BY event.mtime ASC ",
                 zAfter);
    rc = 1;
  }else if(zBefore && *zBefore){
    while( fossil_isspace(*zBefore) ) ++zBefore;
    blob_appendf(pSql,
                 " AND event.mtime<=(SELECT julianday(%Q,fromLocal())) "
                 " ORDER BY event.mtime DESC ",
                 zBefore);
    rc = -1;
  }else{
    blob_append(pSql, " ORDER BY event.mtime DESC ", -1);
    rc = 0;
  }
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
           "       (fid==0) AS isdel,"
           "       (SELECT name FROM filename WHERE fnid=mlink.fnid) AS name,"
           "       blob.uuid as uuid,"
           "       (SELECT uuid FROM blob WHERE rid=pid) as parent,"
           "       blob.size as size"
           "  FROM mlink, blob"
           " WHERE mid=%d AND pid!=fid"
           " AND blob.rid=fid "
           " ORDER BY name /*sort*/",
             rid
             );
  while( (SQLITE_ROW == db_step(&q)) ){
    cson_value * rowV = cson_value_new_object();
    cson_object * row = cson_value_get_object(rowV);
    int const isNew = db_column_int(&q,0);






|







324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
           "       (fid==0) AS isdel,"
           "       (SELECT name FROM filename WHERE fnid=mlink.fnid) AS name,"
           "       blob.uuid as uuid,"
           "       (SELECT uuid FROM blob WHERE rid=pid) as parent,"
           "       blob.size as size"
           "  FROM mlink, blob"
           " WHERE mid=%d AND pid!=fid"
           " AND blob.rid=fid AND NOT mlink.isaux"
           " ORDER BY name /*sort*/",
             rid
             );
  while( (SQLITE_ROW == db_step(&q)) ){
    cson_value * rowV = cson_value_new_object();
    cson_object * row = cson_value_get_object(rowV);
    int const isNew = db_column_int(&q,0);
Changes to src/json_wiki.c.
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
  }

  blob_init(&w1, pW1->zWiki, -1);
  blob_zero(&w2);
  blob_init(&w2, pW2->zWiki, -1);
  blob_zero(&d);
  diffFlags = DIFF_IGNORE_EOLWS | DIFF_STRIP_EOLCR;
  text_diff(&w2, &w1, &d, 0, diffFlags);
  blob_reset(&w1);
  blob_reset(&w2);

  pay = cson_new_object();
  
  zUuid = json_wiki_get_uuid_for_rid( pW1->rid );
  cson_object_set(pay, "v1", json_new_string(zUuid) );






|







544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
  }

  blob_init(&w1, pW1->zWiki, -1);
  blob_zero(&w2);
  blob_init(&w2, pW2->zWiki, -1);
  blob_zero(&d);
  diffFlags = DIFF_IGNORE_EOLWS | DIFF_STRIP_EOLCR;
  text_diff(&w1, &w2, &d, 0, diffFlags);
  blob_reset(&w1);
  blob_reset(&w2);

  pay = cson_new_object();
  
  zUuid = json_wiki_get_uuid_for_rid( pW1->rid );
  cson_object_set(pay, "v1", json_new_string(zUuid) );
Changes to src/leaf.c.
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
                TAG_BRANCH, rid);
  if( zBr==0 ) zBr = fossil_strdup("trunk");
  blob_init(&msg, 0, 0);
  blob_appendf(&msg, "WARNING: multiple open leaf check-ins on %s:", zBr);
  db_prepare(&q,
    "SELECT"
    "  (SELECT uuid FROM blob WHERE rid=leaf.rid),"
    "  (SELECT datetime(mtime%s) FROM event WHERE objid=leaf.rid),"
    "  leaf.rid"
    "  FROM leaf"
    " WHERE (SELECT value FROM tagxref WHERE tagid=%d AND rid=leaf.rid)=%Q"
    "   AND NOT %z"
    " ORDER BY 2 DESC",
    timeline_utc(), TAG_BRANCH, zBr, leaf_is_closed_sql("leaf.rid")
  );
  while( db_step(&q)==SQLITE_ROW ){
    blob_appendf(&msg, "\n  (%d) %s [%S]%s",
          ++n, db_column_text(&q,1), db_column_text(&q,0),
          db_column_int(&q,2)==currentCkout ? " (current)" : "");
  }
  db_finalize(&q);






|





|







222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
                TAG_BRANCH, rid);
  if( zBr==0 ) zBr = fossil_strdup("trunk");
  blob_init(&msg, 0, 0);
  blob_appendf(&msg, "WARNING: multiple open leaf check-ins on %s:", zBr);
  db_prepare(&q,
    "SELECT"
    "  (SELECT uuid FROM blob WHERE rid=leaf.rid),"
    "  (SELECT datetime(mtime,toLocal()) FROM event WHERE objid=leaf.rid),"
    "  leaf.rid"
    "  FROM leaf"
    " WHERE (SELECT value FROM tagxref WHERE tagid=%d AND rid=leaf.rid)=%Q"
    "   AND NOT %z"
    " ORDER BY 2 DESC",
    TAG_BRANCH, zBr, leaf_is_closed_sql("leaf.rid")
  );
  while( db_step(&q)==SQLITE_ROW ){
    blob_appendf(&msg, "\n  (%d) %s [%S]%s",
          ++n, db_column_text(&q,1), db_column_text(&q,0),
          db_column_int(&q,2)==currentCkout ? " (current)" : "");
  }
  db_finalize(&q);
Changes to src/linenoise.c.
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#include <sys/types.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "linenoise.h"

#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
#define LINENOISE_MAX_LINE 4096
static char *unsupported_term[] = {"dumb","cons25","emacs",NULL};
static linenoiseCompletionCallback *completionCallback = NULL;

static struct termios orig_termios; /* In order to restore at exit.*/
static int rawmode = 0; /* For atexit() function to check if restore is needed*/
static int mlmode = 0;  /* Multi line mode. Default is single line. */
static int atexit_registered = 0; /* Register atexit just 1 time. */
static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;






|







116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#include <sys/types.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "linenoise.h"

#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
#define LINENOISE_MAX_LINE 4096
static const char *unsupported_term[] = {"dumb","cons25","emacs",NULL};
static linenoiseCompletionCallback *completionCallback = NULL;

static struct termios orig_termios; /* In order to restore at exit.*/
static int rawmode = 0; /* For atexit() function to check if restore is needed*/
static int mlmode = 0;  /* Multi line mode. Default is single line. */
static int atexit_registered = 0; /* Register atexit just 1 time. */
static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
        nread = read(STDIN_FILENO,&c,1);
        if (nread <= 0) continue;
        memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */
        quit[sizeof(quit)-1] = c; /* Insert current char on the right. */
        if (memcmp(quit,"quit",sizeof(quit)) == 0) break;

        printf("'%c' %02x (%d) (type quit to exit)\n",
            isprint(c) ? c : '?', (int)c, (int)c);
        printf("\r"); /* Go left edge manually, we are in raw mode. */
        fflush(stdout);
    }
    disableRawMode(STDIN_FILENO);
}

/* This function calls the line editing function linenoiseEdit() using






|







918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
        nread = read(STDIN_FILENO,&c,1);
        if (nread <= 0) continue;
        memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */
        quit[sizeof(quit)-1] = c; /* Insert current char on the right. */
        if (memcmp(quit,"quit",sizeof(quit)) == 0) break;

        printf("'%c' %02x (%d) (type quit to exit)\n",
            isprint((int)c) ? c : '?', (int)c, (int)c);
        printf("\r"); /* Go left edge manually, we are in raw mode. */
        fflush(stdout);
    }
    disableRawMode(STDIN_FILENO);
}

/* This function calls the line editing function linenoiseEdit() using
Changes to src/login.c.
214
215
216
217
218
219
220
221


222
223
224
225
226
227
228
  char *zSha1Pw = sha1_shared_secret(zPasswd, zUsername, 0);
  int const uid =
      db_int(0,
             "SELECT uid FROM user"
             " WHERE login=%Q"
             "   AND length(cap)>0 AND length(pw)>0"
             "   AND login NOT IN ('anonymous','nobody','developer','reader')"
             "   AND (pw=%Q OR (length(pw)<>40 AND pw=%Q))",


             zUsername, zSha1Pw, zPasswd
             );
  free(zSha1Pw);
  return uid;
}

/*






|
>
>







214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
  char *zSha1Pw = sha1_shared_secret(zPasswd, zUsername, 0);
  int const uid =
      db_int(0,
             "SELECT uid FROM user"
             " WHERE login=%Q"
             "   AND length(cap)>0 AND length(pw)>0"
             "   AND login NOT IN ('anonymous','nobody','developer','reader')"
             "   AND (pw=%Q OR (length(pw)<>40 AND pw=%Q))"
             "   AND (info NOT LIKE '%%expires 20%%'"
             "      OR substr(info,instr(lower(info),'expires')+8,10)>datetime('now'))",
             zUsername, zSha1Pw, zPasswd
             );
  free(zSha1Pw);
  return uid;
}

/*
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084

1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
  int i;
  FossilUserPerms *p = (flags & LOGIN_ANON) ? &g.anon : &g.perm;
  if(NULL==zCap){
    return;
  }
  for(i=0; zCap[i]; i++){
    switch( zCap[i] ){
      case 's':   p->Setup = 1;  /* Fall thru into Admin */
      case 'a':   p->Admin = p->RdTkt = p->WrTkt = p->Zip =
                           p->RdWiki = p->WrWiki = p->NewWiki =
                           p->ApndWiki = p->Hyperlink = p->Clone =
                           p->NewTkt = p->Password = p->RdAddr =
                           p->TktFmt = p->Attach = p->ApndTkt =
                           p->ModWiki = p->ModTkt = 1;

                           /* Fall thru into Read/Write */
      case 'i':   p->Read = p->Write = 1;                     break;
      case 'o':   p->Read = 1;                                 break;
      case 'z':   p->Zip = 1;                                  break;

      case 'd':   p->Delete = 1;                               break;
      case 'h':   p->Hyperlink = 1;                            break;
      case 'g':   p->Clone = 1;                                break;
      case 'p':   p->Password = 1;                             break;

      case 'j':   p->RdWiki = 1;                               break;
      case 'k':   p->WrWiki = p->RdWiki = p->ApndWiki =1;    break;
      case 'm':   p->ApndWiki = 1;                             break;
      case 'f':   p->NewWiki = 1;                              break;
      case 'l':   p->ModWiki = 1;                              break;

      case 'e':   p->RdAddr = 1;                               break;
      case 'r':   p->RdTkt = 1;                                break;
      case 'n':   p->NewTkt = 1;                               break;






|

|
|
|
|
|
>
|
|









|







1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
  int i;
  FossilUserPerms *p = (flags & LOGIN_ANON) ? &g.anon : &g.perm;
  if(NULL==zCap){
    return;
  }
  for(i=0; zCap[i]; i++){
    switch( zCap[i] ){
      case 's':   p->Setup = 1; /* Fall thru into Admin */
      case 'a':   p->Admin = p->RdTkt = p->WrTkt = p->Zip =
                             p->RdWiki = p->WrWiki = p->NewWiki =
                             p->ApndWiki = p->Hyperlink = p->Clone =
                             p->NewTkt = p->Password = p->RdAddr =
                             p->TktFmt = p->Attach = p->ApndTkt =
                             p->ModWiki = p->ModTkt = p->Delete =
                             p->Private = 1;
                             /* Fall thru into Read/Write */
      case 'i':   p->Read = p->Write = 1;                      break;
      case 'o':   p->Read = 1;                                 break;
      case 'z':   p->Zip = 1;                                  break;

      case 'd':   p->Delete = 1;                               break;
      case 'h':   p->Hyperlink = 1;                            break;
      case 'g':   p->Clone = 1;                                break;
      case 'p':   p->Password = 1;                             break;

      case 'j':   p->RdWiki = 1;                               break;
      case 'k':   p->WrWiki = p->RdWiki = p->ApndWiki =1;      break;
      case 'm':   p->ApndWiki = 1;                             break;
      case 'f':   p->NewWiki = 1;                              break;
      case 'l':   p->ModWiki = 1;                              break;

      case 'e':   p->RdAddr = 1;                               break;
      case 'r':   p->RdTkt = 1;                                break;
      case 'n':   p->NewTkt = 1;                               break;
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
      case 't':  rc = p->TktFmt;    break;
      /* case 'u': READER    */
      /* case 'v': DEVELOPER */
      case 'w':  rc = p->WrTkt;     break;
      case 'x':  rc = p->Private;   break;
      /* case 'y': */
      case 'z':  rc = p->Zip;       break;
      default:   rc = 0;             break;
    }
  }
  return rc;
}

/*
** Change the login to zUser.






|







1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
      case 't':  rc = p->TktFmt;    break;
      /* case 'u': READER    */
      /* case 'v': DEVELOPER */
      case 'w':  rc = p->WrTkt;     break;
      case 'x':  rc = p->Private;   break;
      /* case 'y': */
      case 'z':  rc = p->Zip;       break;
      default:   rc = 0;            break;
    }
  }
  return rc;
}

/*
** Change the login to zUser.
1285
1286
1287
1288
1289
1290
1291
1292
1293

1294
1295
1296
1297
1298
1299
1300
void login_insert_csrf_secret(void){
  @ <input type="hidden" name="csrf" value="%s(g.zCsrfToken)" />
}

/*
** Before using the results of a form, first call this routine to verify
** that this Anti-CSRF token is present and is valid.  If the Anti-CSRF token
** is missing or is incorrect, that indicates a cross-site scripting attach
** so emits an error message and abort.

*/
void login_verify_csrf_secret(void){
  if( g.okCsrf ) return;
  if( fossil_strcmp(P("csrf"), g.zCsrfToken)==0 ){
    g.okCsrf = 1;
    return;
  }






|
|
>







1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
void login_insert_csrf_secret(void){
  @ <input type="hidden" name="csrf" value="%s(g.zCsrfToken)" />
}

/*
** Before using the results of a form, first call this routine to verify
** that this Anti-CSRF token is present and is valid.  If the Anti-CSRF token
** is missing or is incorrect, that indicates a cross-site scripting attack.
** If the event of an attack is detected, an error message is generated and
** all further processing is aborted.
*/
void login_verify_csrf_secret(void){
  if( g.okCsrf ) return;
  if( fossil_strcmp(P("csrf"), g.zCsrfToken)==0 ){
    g.okCsrf = 1;
    return;
  }
Changes to src/lookslike.c.
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
  if( n==0 ) return flags;  /* Empty file -> text */
  c = *z;
  if( c==0 ){
    flags |= LOOK_NUL;  /* NUL character in a file -> binary */
  }else if( c=='\r' ){
    flags |= LOOK_CR;
    if( n<=1 || z[1]!='\n' ){
      flags |= LOOK_LONE_CR;  /* More chars, next char is not LF */
    }
  }
  j = (c!='\n');
  if( !j ) flags |= (LOOK_LF | LOOK_LONE_LF);  /* Found LF as first char */
  while( !(flags&stopFlags) && --n>0 ){
    int c2 = c;
    c = *++z; ++j;






|







93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
  if( n==0 ) return flags;  /* Empty file -> text */
  c = *z;
  if( c==0 ){
    flags |= LOOK_NUL;  /* NUL character in a file -> binary */
  }else if( c=='\r' ){
    flags |= LOOK_CR;
    if( n<=1 || z[1]!='\n' ){
      flags |= LOOK_LONE_CR;  /* Not enough chars or next char not LF */
    }
  }
  j = (c!='\n');
  if( !j ) flags |= (LOOK_LF | LOOK_LONE_LF);  /* Found LF as first char */
  while( !(flags&stopFlags) && --n>0 ){
    int c2 = c;
    c = *++z; ++j;
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
      if( j>LENGTH_MASK ){
        flags |= LOOK_LONG;  /* Very long line -> binary */
      }
      j = 0;
    }else if( c=='\r' ){
      flags |= LOOK_CR;
      if( n<=1 || z[1]!='\n' ){
        flags |= LOOK_LONE_CR;  /* More chars, next char is not LF */
      }
    }
  }
  if( n ){
    flags |= LOOK_SHORT;  /* The whole blob was not examined */
  }
  if( j>LENGTH_MASK ){






|







117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
      if( j>LENGTH_MASK ){
        flags |= LOOK_LONG;  /* Very long line -> binary */
      }
      j = 0;
    }else if( c=='\r' ){
      flags |= LOOK_CR;
      if( n<=1 || z[1]!='\n' ){
        flags |= LOOK_LONE_CR;  /* Not enough chars or next char not LF */
      }
    }
  }
  if( n ){
    flags |= LOOK_SHORT;  /* The whole blob was not examined */
  }
  if( j>LENGTH_MASK ){
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
    c = UTF16_SWAP(c);
  }
  if( c==0 ){
    flags |= LOOK_NUL;  /* NUL character in a file -> binary */
  }else if( c=='\r' ){
    flags |= LOOK_CR;
    if( n<(2*sizeof(WCHAR_T)) || UTF16_SWAP_IF(bReverse, z[1])!='\n' ){
      flags |= LOOK_LONE_CR;  /* More chars, next char is not LF */
    }
  }
  j = (c!='\n');
  if( !j ) flags |= (LOOK_LF | LOOK_LONE_LF);  /* Found LF as first char */
  while( !(flags&stopFlags) && ((n-=sizeof(WCHAR_T))>=sizeof(WCHAR_T)) ){
    int c2 = c;
    c = *++z;






|







242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
    c = UTF16_SWAP(c);
  }
  if( c==0 ){
    flags |= LOOK_NUL;  /* NUL character in a file -> binary */
  }else if( c=='\r' ){
    flags |= LOOK_CR;
    if( n<(2*sizeof(WCHAR_T)) || UTF16_SWAP_IF(bReverse, z[1])!='\n' ){
      flags |= LOOK_LONE_CR;  /* Not enough chars or next char not LF */
    }
  }
  j = (c!='\n');
  if( !j ) flags |= (LOOK_LF | LOOK_LONE_LF);  /* Found LF as first char */
  while( !(flags&stopFlags) && ((n-=sizeof(WCHAR_T))>=sizeof(WCHAR_T)) ){
    int c2 = c;
    c = *++z;
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
      if( j>UTF16_LENGTH_MASK ){
        flags |= LOOK_LONG;  /* Very long line -> binary */
      }
      j = 0;
    }else if( c=='\r' ){
      flags |= LOOK_CR;
      if( n<(2*sizeof(WCHAR_T)) || UTF16_SWAP_IF(bReverse, z[1])!='\n' ){
        flags |= LOOK_LONE_CR;  /* More chars, next char is not LF */
      }
    }
  }
  if( n ){
    flags |= LOOK_SHORT;  /* The whole blob was not examined */
  }
  if( j>UTF16_LENGTH_MASK ){






|







270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
      if( j>UTF16_LENGTH_MASK ){
        flags |= LOOK_LONG;  /* Very long line -> binary */
      }
      j = 0;
    }else if( c=='\r' ){
      flags |= LOOK_CR;
      if( n<(2*sizeof(WCHAR_T)) || UTF16_SWAP_IF(bReverse, z[1])!='\n' ){
        flags |= LOOK_LONE_CR;  /* Not enough chars or next char not LF */
      }
    }
  }
  if( n ){
    flags |= LOOK_SHORT;  /* The whole blob was not examined */
  }
  if( j>UTF16_LENGTH_MASK ){
Changes to src/main.c.
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  g.argc = argc;
  g.argv = argv;
  sqlite3_initialize();
#if defined(_WIN32) && defined(BROKEN_MINGW_CMDLINE)
  for(i=0; i<g.argc; i++) g.argv[i] = fossil_mbcs_to_utf8(g.argv[i]);
#else
  for(i=0; i<g.argc; i++) g.argv[i] = fossil_filename_to_utf8(g.argv[i]);
#endif
#if defined(_WIN32)
  GetModuleFileNameW(NULL, buf, MAX_PATH);
  g.nameOfExe = fossil_filename_to_utf8(buf);
#else
  g.nameOfExe = g.argv[0];
#endif
  for(i=1; i<g.argc-1; i++){
    z = g.argv[i];
    if( z[0]!='-' ) continue;
    z++;






|



|







418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  g.argc = argc;
  g.argv = argv;
  sqlite3_initialize();
#if defined(_WIN32) && defined(BROKEN_MINGW_CMDLINE)
  for(i=0; i<g.argc; i++) g.argv[i] = fossil_mbcs_to_utf8(g.argv[i]);
#else
  for(i=0; i<g.argc; i++) g.argv[i] = fossil_path_to_utf8(g.argv[i]);
#endif
#if defined(_WIN32)
  GetModuleFileNameW(NULL, buf, MAX_PATH);
  g.nameOfExe = fossil_path_to_utf8(buf);
#else
  g.nameOfExe = g.argv[0];
#endif
  for(i=1; i<g.argc-1; i++){
    z = g.argv[i];
    if( z[0]!='-' ) continue;
    z++;
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
#endif
int main(int argc, char **argv)
#endif
{
  const char *zCmdName = "unknown";
  int idx;
  int rc;
  if( sqlite3_libversion_number()<3008007 ){
    fossil_fatal("Unsuitable SQLite version %s, must be at least 3.8.7",
                 sqlite3_libversion());
  }
  sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
  sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0);
  memset(&g, 0, sizeof(g));
  g.now = time(0);
  g.httpHeader = empty_blob;






|
|







590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
#endif
int main(int argc, char **argv)
#endif
{
  const char *zCmdName = "unknown";
  int idx;
  int rc;
  if( sqlite3_libversion_number()<3010000 ){
    fossil_fatal("Unsuitable SQLite version %s, must be at least 3.10.0",
                 sqlite3_libversion());
  }
  sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
  sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0);
  memset(&g, 0, sizeof(g));
  g.now = time(0);
  g.httpHeader = empty_blob;
874
875
876
877
878
879
880


































881
882
883
884
885
886
887
      zReturn = g.argv[i+hasArg];
      remove_from_argv(i, 1+hasArg);
      break;
    }
  }
  return zReturn;
}



































/*
** Look for a repository command-line option.  If present, [re-]cache it in
** the global state and return the new pointer, freeing any previous value.
** If absent and there is no cached value, return NULL.
*/
const char *find_repository_option(){






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
      zReturn = g.argv[i+hasArg];
      remove_from_argv(i, 1+hasArg);
      break;
    }
  }
  return zReturn;
}

/*
** Look for multiple occurrences of a command-line option with the
** corresponding argument.
**
** Return a malloc allocated array of pointers to the arguments.
**
** pnUsedArgs is used to store the number of matched arguments.
**
** Caller is responsible to free allocated memory.
*/
const char **find_repeatable_option(
  const char *zLong,
  const char *zShort,
  int *pnUsedArgs
){
  const char *zOption;
  const char **pzArgs = 0;
  int nAllocArgs = 0;
  int nUsedArgs = 0;

  while( (zOption = find_option(zLong, zShort, 1))!=0 ){
    if( pzArgs==0 && nAllocArgs==0 ){
      nAllocArgs = 1;
      pzArgs = fossil_malloc( nAllocArgs*sizeof(pzArgs[0]) );
    }else if( nAllocArgs<=nUsedArgs ){
      nAllocArgs = nAllocArgs*2;
      pzArgs = fossil_realloc( (void *)pzArgs, nAllocArgs*sizeof(pzArgs[0]) );
    }
    pzArgs[nUsedArgs++] = zOption;
  }
  *pnUsedArgs = nUsedArgs;
  return pzArgs;
}

/*
** Look for a repository command-line option.  If present, [re-]cache it in
** the global state and return the new pointer, freeing any previous value.
** If absent and there is no cached value, return NULL.
*/
const char *find_repository_option(){
973
974
975
976
977
978
979









































































980
981
982
983
984
985
986
987
988
989

990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008


1009
1010
1011
1012
1013
1014
1015
1016
1017
1018

1019

1020



1021

1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068









1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
** This function returns a human readable version string.
*/
const char *get_version(){
  static const char version[] = RELEASE_VERSION " " MANIFEST_VERSION " "
                                MANIFEST_DATE " UTC";
  return version;
}










































































/*
** This function returns the user-agent string for Fossil, for
** use in HTTP(S) requests.
*/
const char *get_user_agent(){
  static const char version[] = "Fossil/" RELEASE_VERSION " (" MANIFEST_DATE
                                " " MANIFEST_VERSION ")";
  return version;
}


/*
** COMMAND: version
**
** Usage: %fossil version ?-verbose|-v?
**
** Print the source code version number for the fossil executable.
** If the verbose option is specified, additional details will
** be output about what optional features this binary was compiled
** with
*/
void version_cmd(void){
  int verboseFlag = 0;

  fossil_print("This is fossil version %s\n", get_version());
  verboseFlag = find_option("verbose","v",0)!=0;

  /* We should be done with options.. */
  verify_all_options();



  if(!verboseFlag){
    return;
  }else{
#if defined(FOSSIL_ENABLE_TCL)
    int rc;
    const char *zRc;
#endif
    fossil_print("Compiled on %s %s using %s (%d-bit)\n",
                 __DATE__, __TIME__, COMPILER_NAME, sizeof(void*)*8);

    fossil_print("SQLite %s %.30s\n", sqlite3_libversion(), sqlite3_sourceid());

    fossil_print("Schema version %s\n", AUX_SCHEMA_MAX);



#if defined(FOSSIL_ENABLE_MINIZ)

    fossil_print("miniz %s, loaded %s\n", MZ_VERSION, mz_version());
#else
    fossil_print("zlib %s, loaded %s\n", ZLIB_VERSION, zlibVersion());
#endif
#if defined(FOSSIL_ENABLE_SSL)
    fossil_print("SSL (%s)\n", SSLeay_version(SSLEAY_VERSION));
#endif
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
    fossil_print("LEGACY_MV_RM\n");
#endif
#if defined(FOSSIL_ENABLE_TH1_DOCS)
    fossil_print("TH1_DOCS\n");
#endif
#if defined(FOSSIL_ENABLE_TH1_HOOKS)
    fossil_print("TH1_HOOKS\n");
#endif
#if defined(FOSSIL_ENABLE_TCL)
    Th_FossilInit(TH_INIT_DEFAULT | TH_INIT_FORCE_TCL);
    rc = Th_Eval(g.interp, 0, "tclInvoke info patchlevel", -1);
    zRc = Th_ReturnCodeName(rc, 0);
    fossil_print("TCL (Tcl %s, loaded %s: %s)\n",
      TCL_PATCH_LEVEL, zRc, Th_GetResult(g.interp, 0)
    );
#endif
#if defined(USE_TCL_STUBS)
    fossil_print("USE_TCL_STUBS\n");
#endif
#if defined(FOSSIL_ENABLE_TCL_STUBS)
    fossil_print("TCL_STUBS\n");
#endif
#if defined(FOSSIL_ENABLE_TCL_PRIVATE_STUBS)
    fossil_print("TCL_PRIVATE_STUBS\n");
#endif
#if defined(FOSSIL_ENABLE_JSON)
    fossil_print("JSON (API %s)\n", FOSSIL_JSON_API_VERSION);
#endif
#if defined(BROKEN_MINGW_CMDLINE)
    fossil_print("MBCS_COMMAND_LINE\n");
#else
    fossil_print("UNICODE_COMMAND_LINE\n");
#endif
#if defined(FOSSIL_DYNAMIC_BUILD)
    fossil_print("DYNAMIC_BUILD\n");
#else
    fossil_print("STATIC_BUILD\n");
#endif
  }









}


/*
** COMMAND: help
**
** Usage: %fossil help COMMAND
**    or: %fossil COMMAND --help
**
** Display information on how to use COMMAND.  To display a list of
** available commands one of:
**
**    %fossil help              Show common commands
**    %fossil help -a|--all     Show both common and auxiliary commands
**    %fossil help -t|--test    Show test commands only
**    %fossil help -x|--aux     Show auxiliary commands only
**    %fossil help -w|--www     Show list of WWW pages
*/






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>










>












<
|
<
|



>
>
|
|
|
<
<
<
<
<
<
<
>
|
>
|
>
>
>
|
>
|
<
<
<
<
|
<
<
<
<
<
|
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|
>
>
>
>
>
>
>
>
>










|







1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109

1110

1111
1112
1113
1114
1115
1116
1117
1118
1119







1120
1121
1122
1123
1124
1125
1126
1127
1128
1129




1130





1131


































1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
** This function returns a human readable version string.
*/
const char *get_version(){
  static const char version[] = RELEASE_VERSION " " MANIFEST_VERSION " "
                                MANIFEST_DATE " UTC";
  return version;
}

/*
** This function populates a blob with version information.  It is used by
** the "version" command and "test-version" web page.  It assumes the blob
** passed to it is uninitialized; otherwise, it will leak memory.
*/
static void get_version_blob(
  Blob *pOut,                 /* Write the manifest here */
  int bVerbose                /* Non-zero for full information. */
){
#if defined(FOSSIL_ENABLE_TCL)
  int rc;
  const char *zRc;
#endif
  blob_zero(pOut);
  blob_appendf(pOut, "This is fossil version %s\n", get_version());
  if( !bVerbose ) return;
  blob_appendf(pOut, "Compiled on %s %s using %s (%d-bit)\n",
               __DATE__, __TIME__, COMPILER_NAME, sizeof(void*)*8);
  blob_appendf(pOut, "SQLite %s %.30s\n", sqlite3_libversion(),
               sqlite3_sourceid());
  blob_appendf(pOut, "Schema version %s\n", AUX_SCHEMA_MAX);
#if defined(FOSSIL_ENABLE_MINIZ)
  blob_appendf(pOut, "miniz %s, loaded %s\n", MZ_VERSION, mz_version());
#else
  blob_appendf(pOut, "zlib %s, loaded %s\n", ZLIB_VERSION, zlibVersion());
#endif
#if defined(FOSSIL_ENABLE_SSL)
  blob_appendf(pOut, "SSL (%s)\n", SSLeay_version(SSLEAY_VERSION));
#endif
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
  blob_append(pOut, "LEGACY_MV_RM\n", -1);
#endif
#if defined(FOSSIL_ENABLE_EXEC_REL_PATHS)
  blob_append(pOut, "EXEC_REL_PATHS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_TH1_DOCS)
  blob_append(pOut, "TH1_DOCS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_TH1_HOOKS)
  blob_append(pOut, "TH1_HOOKS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_TCL)
  Th_FossilInit(TH_INIT_DEFAULT | TH_INIT_FORCE_TCL);
  rc = Th_Eval(g.interp, 0, "tclInvoke info patchlevel", -1);
  zRc = Th_ReturnCodeName(rc, 0);
  blob_appendf(pOut, "TCL (Tcl %s, loaded %s: %s)\n",
    TCL_PATCH_LEVEL, zRc, Th_GetResult(g.interp, 0)
  );
#endif
#if defined(USE_TCL_STUBS)
  blob_append(pOut, "USE_TCL_STUBS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_TCL_STUBS)
  blob_append(pOut, "TCL_STUBS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_TCL_PRIVATE_STUBS)
  blob_append(pOut, "TCL_PRIVATE_STUBS\n", -1);
#endif
#if defined(FOSSIL_ENABLE_JSON)
  blob_appendf(pOut, "JSON (API %s)\n", FOSSIL_JSON_API_VERSION);
#endif
#if defined(BROKEN_MINGW_CMDLINE)
  blob_append(pOut, "MBCS_COMMAND_LINE\n", -1);
#else
  blob_append(pOut, "UNICODE_COMMAND_LINE\n", -1);
#endif
#if defined(FOSSIL_DYNAMIC_BUILD)
  blob_append(pOut, "DYNAMIC_BUILD\n", -1);
#else
  blob_append(pOut, "STATIC_BUILD\n", -1);
#endif
}

/*
** This function returns the user-agent string for Fossil, for
** use in HTTP(S) requests.
*/
const char *get_user_agent(){
  static const char version[] = "Fossil/" RELEASE_VERSION " (" MANIFEST_DATE
                                " " MANIFEST_VERSION ")";
  return version;
}


/*
** COMMAND: version
**
** Usage: %fossil version ?-verbose|-v?
**
** Print the source code version number for the fossil executable.
** If the verbose option is specified, additional details will
** be output about what optional features this binary was compiled
** with
*/
void version_cmd(void){

  Blob versionInfo;

  int verboseFlag = find_option("verbose","v",0)!=0;

  /* We should be done with options.. */
  verify_all_options();
  get_version_blob(&versionInfo, verboseFlag);
  fossil_print("%s", blob_str(&versionInfo));
}









/*
** WEBPAGE: test-version
**
** Show the version information for Fossil.
**
** Query parameters:
**
**    verbose       Show all available details.
*/
void test_version_page(void){




  Blob versionInfo;





  int verboseFlag;



































  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  verboseFlag = P("verbose")!=0;
  style_header("Version Information");
  get_version_blob(&versionInfo, verboseFlag);
  @ <blockquote><pre>
  @ %h(blob_str(&versionInfo))
  @ </pre></blockquote>
  style_footer();
}


/*
** COMMAND: help
**
** Usage: %fossil help COMMAND
**    or: %fossil COMMAND --help
**
** Display information on how to use COMMAND.  To display a list of
** available commands use one of:
**
**    %fossil help              Show common commands
**    %fossil help -a|--all     Show both common and auxiliary commands
**    %fossil help -t|--test    Show test commands only
**    %fossil help -x|--aux     Show auxiliary commands only
**    %fossil help -w|--www     Show list of WWW pages
*/
1322
1323
1324
1325
1326
1327
1328
1329






1330
1331
1332
1333
1334
1335
1336
  const char *zMode;
  const char *zCur;

  if( g.zBaseURL!=0 ) return;
  if( zAltBase ){
    int i, n, c;
    g.zTop = g.zBaseURL = mprintf("%s", zAltBase);
    if( memcmp(g.zTop, "http://", 7)!=0 && memcmp(g.zTop,"https://",8)!=0 ){






      fossil_fatal("argument to --baseurl should be 'http://host/path'"
                   " or 'https://host/path'");
    }
    for(i=n=0; (c = g.zTop[i])!=0; i++){
      if( c=='/' ){
        n++;
        if( n==3 ){






|
>
>
>
>
>
>







1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
  const char *zMode;
  const char *zCur;

  if( g.zBaseURL!=0 ) return;
  if( zAltBase ){
    int i, n, c;
    g.zTop = g.zBaseURL = mprintf("%s", zAltBase);
    if( strncmp(g.zTop, "http://", 7)==0 ){
      /* it is HTTP, replace prefix with HTTPS. */
      g.zHttpsURL = mprintf("https://%s", &g.zTop[7]);
    }else if( strncmp(g.zTop, "https://", 8)==0 ){
      /* it is already HTTPS, use it. */
      g.zHttpsURL = mprintf("%s", g.zTop);
    }else{
      fossil_fatal("argument to --baseurl should be 'http://host/path'"
                   " or 'https://host/path'");
    }
    for(i=n=0; (c = g.zTop[i])!=0; i++){
      if( c=='/' ){
        n++;
        if( n==3 ){
1388
1389
1390
1391
1392
1393
1394
1395

1396
1397
1398
1399
1400
1401
1402
** zRepo might be a directory itself.  In that case chroot into
** the directory zRepo.
**
** Assume the user-id and group-id of the repository, or if zRepo
** is a directory, of that directory.
**
** The noJail flag means that the chroot jail is not entered.  But
** privileges are still lowered to that of the the user-id and group-id.

*/
static char *enter_chroot_jail(char *zRepo, int noJail){
#if !defined(_WIN32)
  if( getuid()==0 ){
    int i;
    struct stat sStat;
    Blob dir;






|
>







1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
** zRepo might be a directory itself.  In that case chroot into
** the directory zRepo.
**
** Assume the user-id and group-id of the repository, or if zRepo
** is a directory, of that directory.
**
** The noJail flag means that the chroot jail is not entered.  But
** privileges are still lowered to that of the user-id and group-id
** of the repository file.
*/
static char *enter_chroot_jail(char *zRepo, int noJail){
#if !defined(_WIN32)
  if( getuid()==0 ){
    int i;
    struct stat sStat;
    Blob dir;
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520







1521
1522
1523
1524
1525
1526
1527
** If the repository is known, it has already been opened.  If unknown,
** then g.zRepositoryName holds the directory that contains the repository
** and the actual repository is taken from the first element of PATH_INFO.
**
** Process the webpage specified by the PATH_INFO or REQUEST_URI
** environment variable.
**
** If the repository is not known, the a search is done through the
** file hierarchy rooted at g.zRepositoryName for a suitable repository
** with a name of $prefix.fossil, where $prefix is any prefix of PATH_INFO.
** Or, if an ordinary file named $prefix is found, and $prefix matches
** pFileGlob and $prefix does not match "*.fossil*" and the mimetype of
** $prefix can be determined from its suffix, then the file $prefix is
** returned as static text.
**
** If no suitable webpage is found, try to redirect to zNotFound.
*/
static void process_one_web_page(
  const char *zNotFound,      /* Redirect here on a 404 if not NULL */
  Glob *pFileGlob,            /* Deliver static files matching */
  int allowRepoList           /* Send repo list for "/" URL */
){
  const char *zPathInfo;
  char *zPath = NULL;
  int idx;
  int i;








  /* If the repository has not been opened already, then find the
  ** repository based on the first element of PATH_INFO and open it.
  */
  zPathInfo = PD("PATH_INFO","");
  if( !g.repositoryOpen ){
    char *zRepo, *zToFree;






|


















>
>
>
>
>
>
>







1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
** If the repository is known, it has already been opened.  If unknown,
** then g.zRepositoryName holds the directory that contains the repository
** and the actual repository is taken from the first element of PATH_INFO.
**
** Process the webpage specified by the PATH_INFO or REQUEST_URI
** environment variable.
**
** If the repository is not known, then a search is done through the
** file hierarchy rooted at g.zRepositoryName for a suitable repository
** with a name of $prefix.fossil, where $prefix is any prefix of PATH_INFO.
** Or, if an ordinary file named $prefix is found, and $prefix matches
** pFileGlob and $prefix does not match "*.fossil*" and the mimetype of
** $prefix can be determined from its suffix, then the file $prefix is
** returned as static text.
**
** If no suitable webpage is found, try to redirect to zNotFound.
*/
static void process_one_web_page(
  const char *zNotFound,      /* Redirect here on a 404 if not NULL */
  Glob *pFileGlob,            /* Deliver static files matching */
  int allowRepoList           /* Send repo list for "/" URL */
){
  const char *zPathInfo;
  char *zPath = NULL;
  int idx;
  int i;

  /* Handle universal query parameters */
  if( PB("utc") ){
    g.fTimeFormat = 1;
  }else if( PB("localtime") ){
    g.fTimeFormat = 2;
  }

  /* If the repository has not been opened already, then find the
  ** repository based on the first element of PATH_INFO and open it.
  */
  zPathInfo = PD("PATH_INFO","");
  if( !g.repositoryOpen ){
    char *zRepo, *zToFree;
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
}

/*
** If g.argv[arg] exists then it is either the name of a repository
** that will be used by a server, or else it is a directory that
** contains multiple repositories that can be served.  If g.argv[arg]
** is a directory, the repositories it contains must be named
** "*.fossil".  If g.argv[arg] does not exists, then we must be within
** a check-out and the repository to be served is the repository of
** that check-out.
**
** Open the repository to be served if it is known.  If g.argv[arg] is
** a directory full of repositories, then set g.zRepositoryName to
** the name of that directory and the specific repository will be
** opened later by process_one_web_page() based on the content of
** the PATH_INFO variable.






|
|







2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
}

/*
** If g.argv[arg] exists then it is either the name of a repository
** that will be used by a server, or else it is a directory that
** contains multiple repositories that can be served.  If g.argv[arg]
** is a directory, the repositories it contains must be named
** "*.fossil".  If g.argv[arg] does not exist, then we must be within
** an open check-out and the repository serve is the repository of
** that check-out.
**
** Open the repository to be served if it is known.  If g.argv[arg] is
** a directory full of repositories, then set g.zRepositoryName to
** the name of that directory and the specific repository will be
** opened later by process_one_web_page() based on the content of
** the PATH_INFO variable.
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
}

/*
** undocumented format:
**
**        fossil http INFILE OUTFILE IPADDR ?REPOSITORY?
**
** The argv==6 form is used by the win32 server only.
**
** COMMAND: http*
**
** Usage: %fossil http ?REPOSITORY? ?OPTIONS?
**
** Handle a single HTTP request appearing on stdin.  The resulting webpage
** is delivered on stdout.  This method is used to launch an HTTP request
** handler from inetd, for example.  The argument is the name of the
** repository.
**
** If REPOSITORY is a directory that contains one or more repositories,
** either directly in REPOSITORY itself, or in subdirectories, and
** with names of the form "*.fossil" then the a prefix of the URL pathname
** selects from among the various repositories.  If the pathname does
** not select a valid repository and the --notfound option is available,
** then the server redirects (HTTP code 302) to the URL of --notfound.
** When REPOSITORY is a directory, the pathname must contain only
** alphanumerics, "_", "/", "-" and "." and no "-" may occur after a "/"
** and every "." must be surrounded on both sides by alphanumerics or else
** a 404 error is returned.  Static content files in the directory are






|











|
|







2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
}

/*
** undocumented format:
**
**        fossil http INFILE OUTFILE IPADDR ?REPOSITORY?
**
** The argv==6 form (with no options) is used by the win32 server only.
**
** COMMAND: http*
**
** Usage: %fossil http ?REPOSITORY? ?OPTIONS?
**
** Handle a single HTTP request appearing on stdin.  The resulting webpage
** is delivered on stdout.  This method is used to launch an HTTP request
** handler from inetd, for example.  The argument is the name of the
** repository.
**
** If REPOSITORY is a directory that contains one or more repositories,
** either directly in REPOSITORY itself or in subdirectories, and
** with names of the form "*.fossil" then a prefix of the URL pathname
** selects from among the various repositories.  If the pathname does
** not select a valid repository and the --notfound option is available,
** then the server redirects (HTTP code 302) to the URL of --notfound.
** When REPOSITORY is a directory, the pathname must contain only
** alphanumerics, "_", "/", "-" and "." and no "-" may occur after a "/"
** and every "." must be surrounded on both sides by alphanumerics or else
** a 404 error is returned.  Static content files in the directory are
2247
2248
2249
2250
2251
2252
2253

2254
2255



2256
2257
2258
2259
2260
2261
2262
          g.fSshClient & CGI_SSH_COMPAT );
}

/*
** Note that the following command is used by ssh:// processing.
**
** COMMAND: test-http

** Works like the http command but gives setup permission to all users.
**



*/
void cmd_test_http(void){
  const char *zIpAddr;    /* IP address of remote client */

  Th_InitTraceLog();
  login_set_capabilities("sx", 0);
  g.useLocalauth = 1;






>


>
>
>







2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
          g.fSshClient & CGI_SSH_COMPAT );
}

/*
** Note that the following command is used by ssh:// processing.
**
** COMMAND: test-http
**
** Works like the http command but gives setup permission to all users.
**
** Options:
**   --th-trace          trace TH1 execution (for debugging purposes)
**
*/
void cmd_test_http(void){
  const char *zIpAddr;    /* IP address of remote client */

  Th_InitTraceLog();
  login_set_capabilities("sx", 0);
  g.useLocalauth = 1;
2338
2339
2340
2341
2342
2343
2344

2345
2346
2347

2348

2349
2350
2351
2352
2353
2354
2355
** --localauth option is present and the "localauth" setting is off and the
** connection is from localhost.  The "ui" command also enables --repolist
** by default.
**
** Options:
**   --baseurl URL       Use URL as the base (useful for reverse proxies)
**   --create            Create a new REPOSITORY if it does not already exist

**   --files GLOBLIST    Comma-separated list of glob patterns for static files
**   --localauth         enable automatic login for requests from localhost
**   --localhost         listen on 127.0.0.1 only (always true for "ui")

**   --nojail            Drop root privileges but do not enter the chroot jail

**   --notfound URL      Redirect
**   -P|--port TCPPORT   listen to request on port TCPPORT
**   --th-trace          trace TH1 execution (for debugging purposes)
**   --repolist          If REPOSITORY is dir, URL "/" lists repos.
**   --scgi              Accept SCGI rather than HTTP
**   --skin LABEL        Use override skin LABEL







>



>

>







2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
** --localauth option is present and the "localauth" setting is off and the
** connection is from localhost.  The "ui" command also enables --repolist
** by default.
**
** Options:
**   --baseurl URL       Use URL as the base (useful for reverse proxies)
**   --create            Create a new REPOSITORY if it does not already exist
**   --page PAGE         Start "ui" on PAGE.  ex: --page "timeline?y=ci"
**   --files GLOBLIST    Comma-separated list of glob patterns for static files
**   --localauth         enable automatic login for requests from localhost
**   --localhost         listen on 127.0.0.1 only (always true for "ui")
**   --https             signal a request coming in via https
**   --nojail            Drop root privileges but do not enter the chroot jail
**   --nossl             signal that no SSL connections are available
**   --notfound URL      Redirect
**   -P|--port TCPPORT   listen to request on port TCPPORT
**   --th-trace          trace TH1 execution (for debugging purposes)
**   --repolist          If REPOSITORY is dir, URL "/" lists repos.
**   --scgi              Accept SCGI rather than HTTP
**   --skin LABEL        Use override skin LABEL

2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374

2375
2376
2377
2378
2379
2380
2381
  char *zBrowserCmd = 0;    /* Command to launch the web browser */
  int isUiCmd;              /* True if command is "ui", not "server' */
  const char *zNotFound;    /* The --notfound option or NULL */
  int flags = 0;            /* Server flags */
#if !defined(_WIN32)
  int noJail;               /* Do not enter the chroot jail */
#endif
  int allowRepoList;        /* List repositories on URL "/" */
  const char *zAltBase;     /* Argument to the --baseurl option */
  const char *zFileGlob;    /* Static content must match this */
  char *zIpAddr = 0;        /* Bind to this IP address */
  int fCreate = 0;


#if defined(_WIN32)
  const char *zStopperFile;    /* Name of file used to terminate server */
  zStopperFile = find_option("stopper", 0, 1);
#endif

  zFileGlob = find_option("files-urlenc",0,1);






|
|
|
|
|
>







2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
  char *zBrowserCmd = 0;    /* Command to launch the web browser */
  int isUiCmd;              /* True if command is "ui", not "server' */
  const char *zNotFound;    /* The --notfound option or NULL */
  int flags = 0;            /* Server flags */
#if !defined(_WIN32)
  int noJail;               /* Do not enter the chroot jail */
#endif
  int allowRepoList;         /* List repositories on URL "/" */
  const char *zAltBase;      /* Argument to the --baseurl option */
  const char *zFileGlob;     /* Static content must match this */
  char *zIpAddr = 0;         /* Bind to this IP address */
  int fCreate = 0;           /* The --create flag */
  const char *zInitPage = 0; /* Start on this page.  --page option */

#if defined(_WIN32)
  const char *zStopperFile;    /* Name of file used to terminate server */
  zStopperFile = find_option("stopper", 0, 1);
#endif

  zFileGlob = find_option("files-urlenc",0,1);
2389
2390
2391
2392
2393
2394
2395





2396
2397
2398
2399
2400
2401
2402







2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
  skin_override();
#if !defined(_WIN32)
  noJail = find_option("nojail",0,0)!=0;
#endif
  g.useLocalauth = find_option("localauth", 0, 0)!=0;
  Th_InitTraceLog();
  zPort = find_option("port", "P", 1);





  zNotFound = find_option("notfound", 0, 1);
  allowRepoList = find_option("repolist",0,0)!=0;
  zAltBase = find_option("baseurl", 0, 1);
  fCreate = find_option("create",0,0)!=0;
  if( find_option("scgi", 0, 0)!=0 ) flags |= HTTP_SERVER_SCGI;
  if( zAltBase ){
    set_base_url(zAltBase);







  }
  if( find_option("localhost", 0, 0)!=0 ){
    flags |= HTTP_SERVER_LOCALHOST;
  }

  /* We should be done with options.. */
  verify_all_options();

  if( g.argc!=2 && g.argc!=3 ) usage("?REPOSITORY?");
  isUiCmd = g.argv[1][0]=='u';
  if( isUiCmd ){
    flags |= HTTP_SERVER_LOCALHOST|HTTP_SERVER_REPOLIST;
    g.useLocalauth = 1;
    allowRepoList = 1;
  }
  find_server_repository(2, fCreate);
  if( zPort ){






>
>
>
>
>







>
>
>
>
>
>
>









<







2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518

2519
2520
2521
2522
2523
2524
2525
  skin_override();
#if !defined(_WIN32)
  noJail = find_option("nojail",0,0)!=0;
#endif
  g.useLocalauth = find_option("localauth", 0, 0)!=0;
  Th_InitTraceLog();
  zPort = find_option("port", "P", 1);
  isUiCmd = g.argv[1][0]=='u';
  if( isUiCmd ){
    zInitPage = find_option("page", 0, 1);
  }
  if( zInitPage==0 ) zInitPage = "";
  zNotFound = find_option("notfound", 0, 1);
  allowRepoList = find_option("repolist",0,0)!=0;
  zAltBase = find_option("baseurl", 0, 1);
  fCreate = find_option("create",0,0)!=0;
  if( find_option("scgi", 0, 0)!=0 ) flags |= HTTP_SERVER_SCGI;
  if( zAltBase ){
    set_base_url(zAltBase);
  }
  g.sslNotAvailable = find_option("nossl", 0, 0)!=0;
  if( find_option("https",0,0)!=0 ){
    cgi_replace_parameter("HTTPS","on");
  }else{
    /* without --https, defaults to not available. */
    g.sslNotAvailable = 1;
  }
  if( find_option("localhost", 0, 0)!=0 ){
    flags |= HTTP_SERVER_LOCALHOST;
  }

  /* We should be done with options.. */
  verify_all_options();

  if( g.argc!=2 && g.argc!=3 ) usage("?REPOSITORY?");

  if( isUiCmd ){
    flags |= HTTP_SERVER_LOCALHOST|HTTP_SERVER_REPOLIST;
    g.useLocalauth = 1;
    allowRepoList = 1;
  }
  find_server_repository(2, fCreate);
  if( zPort ){
2445
2446
2447
2448
2449
2450
2451
2452

2453
2454

2455

2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483

2484
2485

2486

2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
        }
      }
    }
#else
    zBrowser = db_get("web-browser", "open");
#endif
    if( zIpAddr ){
      zBrowserCmd = mprintf("%s http://%s:%%d/ &", zBrowser, zIpAddr);

    }else{
      zBrowserCmd = mprintf("%s http://localhost:%%d/ &", zBrowser);

    }

    if( g.repositoryOpen ) flags |= HTTP_SERVER_HAD_REPOSITORY;
    if( g.localOpen ) flags |= HTTP_SERVER_HAD_CHECKOUT;
  }
  db_close(1);
  if( cgi_http_server(iPort, mxPort, zBrowserCmd, zIpAddr, flags) ){
    fossil_fatal("unable to listen on TCP socket %d", iPort);
  }
  g.sslNotAvailable = 1;
  g.httpIn = stdin;
  g.httpOut = stdout;
  if( g.fHttpTrace || g.fSqlTrace ){
    fprintf(stderr, "====== SERVER pid %d =======\n", getpid());
  }
  g.cgiOutput = 1;
  find_server_repository(2, 0);
  g.zRepositoryName = enter_chroot_jail(g.zRepositoryName, noJail);
  if( flags & HTTP_SERVER_SCGI ){
    cgi_handle_scgi_request();
  }else{
    cgi_handle_http_request(0);
  }
  process_one_web_page(zNotFound, glob_create(zFileGlob), allowRepoList);
#else
  /* Win32 implementation */
  if( isUiCmd ){
    zBrowser = db_get("web-browser", "start");
    if( zIpAddr ){
      zBrowserCmd = mprintf("%s http://%s:%%d/ &", zBrowser, zIpAddr);

    }else{
      zBrowserCmd = mprintf("%s http://localhost:%%d/ &", zBrowser);

    }

    if( g.repositoryOpen ) flags |= HTTP_SERVER_HAD_REPOSITORY;
    if( g.localOpen ) flags |= HTTP_SERVER_HAD_CHECKOUT;
  }
  db_close(1);
  if( allowRepoList ){
    flags |= HTTP_SERVER_REPOLIST;
  }
  if( win32_http_service(iPort, zNotFound, zFileGlob, flags) ){
    win32_http_server(iPort, mxPort, zBrowserCmd,
                      zStopperFile, zNotFound, zFileGlob, zIpAddr, flags);
  }
#endif
}

/*
** COMMAND:  test-echo
**
** Usage:  %fossil test-echo [--hex] ARGS...
**
** Echo all command-line arguments (enclosed in [...]) to the screen so that
** wildcard expansion behavior of the host shell can be investigated.
**
** With the --hex option, show the output as hexadecimal.  This can be used
** to verify the fossil_filename_to_utf8() routine on Windows and Mac.
*/
void test_echo_cmd(void){
  int i, j;
  if( find_option("hex",0,0)==0 ){
    fossil_print("g.nameOfExe = [%s]\n", g.nameOfExe);
    for(i=0; i<g.argc; i++){
      fossil_print("argv[%d] = [%s]\n", i, g.argv[i]);






|
>

|
>

>
|
|
<




<



















|
>

|
>

>
|
|
<




















|







2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566

2567
2568
2569
2570

2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598

2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
        }
      }
    }
#else
    zBrowser = db_get("web-browser", "open");
#endif
    if( zIpAddr ){
      zBrowserCmd = mprintf("%s http://%s:%%d/%s &",
                            zBrowser, zIpAddr, zInitPage);
    }else{
      zBrowserCmd = mprintf("%s http://localhost:%%d/%s &",
                            zBrowser, zInitPage);
    }
  }
  if( g.repositoryOpen ) flags |= HTTP_SERVER_HAD_REPOSITORY;
  if( g.localOpen ) flags |= HTTP_SERVER_HAD_CHECKOUT;

  db_close(1);
  if( cgi_http_server(iPort, mxPort, zBrowserCmd, zIpAddr, flags) ){
    fossil_fatal("unable to listen on TCP socket %d", iPort);
  }

  g.httpIn = stdin;
  g.httpOut = stdout;
  if( g.fHttpTrace || g.fSqlTrace ){
    fprintf(stderr, "====== SERVER pid %d =======\n", getpid());
  }
  g.cgiOutput = 1;
  find_server_repository(2, 0);
  g.zRepositoryName = enter_chroot_jail(g.zRepositoryName, noJail);
  if( flags & HTTP_SERVER_SCGI ){
    cgi_handle_scgi_request();
  }else{
    cgi_handle_http_request(0);
  }
  process_one_web_page(zNotFound, glob_create(zFileGlob), allowRepoList);
#else
  /* Win32 implementation */
  if( isUiCmd ){
    zBrowser = db_get("web-browser", "start");
    if( zIpAddr ){
      zBrowserCmd = mprintf("%s http://%s:%%d/%s &",
                            zBrowser, zIpAddr, zInitPage);
    }else{
      zBrowserCmd = mprintf("%s http://localhost:%%d/%s &",
                            zBrowser, zInitPage);
    }
  }
  if( g.repositoryOpen ) flags |= HTTP_SERVER_HAD_REPOSITORY;
  if( g.localOpen ) flags |= HTTP_SERVER_HAD_CHECKOUT;

  db_close(1);
  if( allowRepoList ){
    flags |= HTTP_SERVER_REPOLIST;
  }
  if( win32_http_service(iPort, zNotFound, zFileGlob, flags) ){
    win32_http_server(iPort, mxPort, zBrowserCmd,
                      zStopperFile, zNotFound, zFileGlob, zIpAddr, flags);
  }
#endif
}

/*
** COMMAND:  test-echo
**
** Usage:  %fossil test-echo [--hex] ARGS...
**
** Echo all command-line arguments (enclosed in [...]) to the screen so that
** wildcard expansion behavior of the host shell can be investigated.
**
** With the --hex option, show the output as hexadecimal.  This can be used
** to verify the fossil_path_to_utf8() routine on Windows and Mac.
*/
void test_echo_cmd(void){
  int i, j;
  if( find_option("hex",0,0)==0 ){
    fossil_print("g.nameOfExe = [%s]\n", g.nameOfExe);
    for(i=0; i<g.argc; i++){
      fossil_print("argv[%d] = [%s]\n", i, g.argv[i]);
Changes to src/main.mk.
476
477
478
479
480
481
482
483


484
485
486
487
488
489
490
                 -DSQLITE_ENABLE_LOCKING_STYLE=0 \
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB



# Setup the options used to compile the included SQLite shell.
SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \
                -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) \
                -DSQLITE_SHELL_DBNAME_PROC=fossil_open







|
>
>







476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
                 -DSQLITE_ENABLE_LOCKING_STYLE=0 \
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB \
                 -DSQLITE_ENABLE_JSON1 \
                 -DSQLITE_ENABLE_FTS5

# Setup the options used to compile the included SQLite shell.
SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \
                -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) \
                -DSQLITE_SHELL_DBNAME_PROC=fossil_open

504
505
506
507
508
509
510










511
512
513
514
515
516
517
518
519
520
521
522
# The FOSSIL_ENABLE_MINIZ variable may be undefined, set to 0, or
# set to 1.  If it is set to 1, the miniz library included in the
# source tree should be used; otherwise, it should not.
MINIZ_OBJ.0 =
MINIZ_OBJ.1 = $(OBJDIR)/miniz.o
MINIZ_OBJ.  = $(MINIZ_OBJ.0)












EXTRAOBJ = \
 $(SQLITE3_OBJ.$(USE_SYSTEM_SQLITE)) \
 $(MINIZ_OBJ.$(FOSSIL_ENABLE_MINIZ)) \
 $(OBJDIR)/linenoise.o \
 $(OBJDIR)/shell.o \
 $(OBJDIR)/th.o \
 $(OBJDIR)/th_lang.o \
 $(OBJDIR)/th_tcl.o \
 $(OBJDIR)/cson_amalgamation.o








>
>
>
>
>
>
>
>
>
>




|







506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
# The FOSSIL_ENABLE_MINIZ variable may be undefined, set to 0, or
# set to 1.  If it is set to 1, the miniz library included in the
# source tree should be used; otherwise, it should not.
MINIZ_OBJ.0 =
MINIZ_OBJ.1 = $(OBJDIR)/miniz.o
MINIZ_OBJ.  = $(MINIZ_OBJ.0)

# The USE_LINENOISE variable may be undefined, set to 0, or set
# to 1. If it is set to 0, then there is no need to build or link
# the linenoise.o object.
LINENOISE_DEF.0 =
LINENOISE_DEF.1 = -DHAVE_LINENOISE
LINENOISE_DEF.  = $(LINENOISE_DEF.0)
LINENOISE_OBJ.0 =
LINENOISE_OBJ.1 = $(OBJDIR)/linenoise.o
LINENOISE_OBJ.  = $(LINENOISE_OBJ.0)


EXTRAOBJ = \
 $(SQLITE3_OBJ.$(USE_SYSTEM_SQLITE)) \
 $(MINIZ_OBJ.$(FOSSIL_ENABLE_MINIZ)) \
 $(LINENOISE_OBJ.$(USE_LINENOISE)) \
 $(OBJDIR)/shell.o \
 $(OBJDIR)/th.o \
 $(OBJDIR)/th_lang.o \
 $(OBJDIR)/th_tcl.o \
 $(OBJDIR)/cson_amalgamation.o


1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
$(OBJDIR)/zip.h:	$(OBJDIR)/headers

$(OBJDIR)/sqlite3.o:	$(SRCDIR)/sqlite3.c
	$(XTCC) $(SQLITE_OPTIONS) $(SQLITE_CFLAGS) -c $(SRCDIR)/sqlite3.c -o $@

$(OBJDIR)/shell.o:	$(SRCDIR)/shell.c $(SRCDIR)/sqlite3.h
	$(XTCC) $(SHELL_OPTIONS) $(SHELL_CFLAGS) -DHAVE_LINENOISE -c $(SRCDIR)/shell.c -o $@

$(OBJDIR)/linenoise.o:	$(SRCDIR)/linenoise.c $(SRCDIR)/linenoise.h
	$(XTCC) -c $(SRCDIR)/linenoise.c -o $@

$(OBJDIR)/th.o:	$(SRCDIR)/th.c
	$(XTCC) -c $(SRCDIR)/th.c -o $@







|







1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
$(OBJDIR)/zip.h:	$(OBJDIR)/headers

$(OBJDIR)/sqlite3.o:	$(SRCDIR)/sqlite3.c
	$(XTCC) $(SQLITE_OPTIONS) $(SQLITE_CFLAGS) -c $(SRCDIR)/sqlite3.c -o $@

$(OBJDIR)/shell.o:	$(SRCDIR)/shell.c $(SRCDIR)/sqlite3.h
	$(XTCC) $(SHELL_OPTIONS) $(SHELL_CFLAGS) $(LINENOISE_DEF.$(USE_LINENOISE)) -c $(SRCDIR)/shell.c -o $@

$(OBJDIR)/linenoise.o:	$(SRCDIR)/linenoise.c $(SRCDIR)/linenoise.h
	$(XTCC) -c $(SRCDIR)/linenoise.c -o $@

$(OBJDIR)/th.o:	$(SRCDIR)/th.c
	$(XTCC) -c $(SRCDIR)/th.c -o $@

Changes to src/makemake.tcl.
159
160
161
162
163
164
165


166
167
168
169
170
171
172
  -DSQLITE_THREADSAFE=0
  -DSQLITE_DEFAULT_FILE_FORMAT=4
  -DSQLITE_OMIT_DEPRECATED
  -DSQLITE_ENABLE_EXPLAIN_COMMENTS
  -DSQLITE_ENABLE_FTS4
  -DSQLITE_ENABLE_FTS3_PARENTHESIS
  -DSQLITE_ENABLE_DBSTAT_VTAB


}
#lappend SQLITE_OPTIONS -DSQLITE_ENABLE_FTS3=1
#lappend SQLITE_OPTIONS -DSQLITE_ENABLE_STAT4
#lappend SQLITE_OPTIONS -DSQLITE_WIN32_NO_ANSI
#lappend SQLITE_OPTIONS -DSQLITE_WINNT_MAX_PATH_CHARS=4096

# Options used to compile the included SQLite shell.






>
>







159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  -DSQLITE_THREADSAFE=0
  -DSQLITE_DEFAULT_FILE_FORMAT=4
  -DSQLITE_OMIT_DEPRECATED
  -DSQLITE_ENABLE_EXPLAIN_COMMENTS
  -DSQLITE_ENABLE_FTS4
  -DSQLITE_ENABLE_FTS3_PARENTHESIS
  -DSQLITE_ENABLE_DBSTAT_VTAB
  -DSQLITE_ENABLE_JSON1
  -DSQLITE_ENABLE_FTS5
}
#lappend SQLITE_OPTIONS -DSQLITE_ENABLE_FTS3=1
#lappend SQLITE_OPTIONS -DSQLITE_ENABLE_STAT4
#lappend SQLITE_OPTIONS -DSQLITE_WIN32_NO_ANSI
#lappend SQLITE_OPTIONS -DSQLITE_WINNT_MAX_PATH_CHARS=4096

# Options used to compile the included SQLite shell.
333
334
335
336
337
338
339










340
341
342
343
344
345
346
347
348
349
350
351
352
353
# The FOSSIL_ENABLE_MINIZ variable may be undefined, set to 0, or
# set to 1.  If it is set to 1, the miniz library included in the
# source tree should be used; otherwise, it should not.
MINIZ_OBJ.0 =
MINIZ_OBJ.1 = $(OBJDIR)/miniz.o
MINIZ_OBJ.  = $(MINIZ_OBJ.0)










}]

writeln [string map [list <<<NEXT_LINE>>> \\] {
EXTRAOBJ = <<<NEXT_LINE>>>
 $(SQLITE3_OBJ.$(USE_SYSTEM_SQLITE)) <<<NEXT_LINE>>>
 $(MINIZ_OBJ.$(FOSSIL_ENABLE_MINIZ)) <<<NEXT_LINE>>>
 $(OBJDIR)/linenoise.o <<<NEXT_LINE>>>
 $(OBJDIR)/shell.o <<<NEXT_LINE>>>
 $(OBJDIR)/th.o <<<NEXT_LINE>>>
 $(OBJDIR)/th_lang.o <<<NEXT_LINE>>>
 $(OBJDIR)/th_tcl.o <<<NEXT_LINE>>>
 $(OBJDIR)/cson_amalgamation.o
}]







>
>
>
>
>
>
>
>
>
>






|







335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
# The FOSSIL_ENABLE_MINIZ variable may be undefined, set to 0, or
# set to 1.  If it is set to 1, the miniz library included in the
# source tree should be used; otherwise, it should not.
MINIZ_OBJ.0 =
MINIZ_OBJ.1 = $(OBJDIR)/miniz.o
MINIZ_OBJ.  = $(MINIZ_OBJ.0)

# The USE_LINENOISE variable may be undefined, set to 0, or set
# to 1. If it is set to 0, then there is no need to build or link
# the linenoise.o object.
LINENOISE_DEF.0 =
LINENOISE_DEF.1 = -DHAVE_LINENOISE
LINENOISE_DEF.  = $(LINENOISE_DEF.0)
LINENOISE_OBJ.0 =
LINENOISE_OBJ.1 = $(OBJDIR)/linenoise.o
LINENOISE_OBJ.  = $(LINENOISE_OBJ.0)
}]

writeln [string map [list <<<NEXT_LINE>>> \\] {
EXTRAOBJ = <<<NEXT_LINE>>>
 $(SQLITE3_OBJ.$(USE_SYSTEM_SQLITE)) <<<NEXT_LINE>>>
 $(MINIZ_OBJ.$(FOSSIL_ENABLE_MINIZ)) <<<NEXT_LINE>>>
 $(LINENOISE_OBJ.$(USE_LINENOISE)) <<<NEXT_LINE>>>
 $(OBJDIR)/shell.o <<<NEXT_LINE>>>
 $(OBJDIR)/th.o <<<NEXT_LINE>>>
 $(OBJDIR)/th_lang.o <<<NEXT_LINE>>>
 $(OBJDIR)/th_tcl.o <<<NEXT_LINE>>>
 $(OBJDIR)/cson_amalgamation.o
}]

400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
  writeln "\$(OBJDIR)/$s.h:\t\$(OBJDIR)/headers\n"
}

writeln "\$(OBJDIR)/sqlite3.o:\t\$(SRCDIR)/sqlite3.c"
writeln "\t\$(XTCC) \$(SQLITE_OPTIONS) \$(SQLITE_CFLAGS) -c \$(SRCDIR)/sqlite3.c -o \$@\n"

writeln "\$(OBJDIR)/shell.o:\t\$(SRCDIR)/shell.c \$(SRCDIR)/sqlite3.h"
writeln "\t\$(XTCC) \$(SHELL_OPTIONS) \$(SHELL_CFLAGS) -DHAVE_LINENOISE -c \$(SRCDIR)/shell.c -o \$@\n"

writeln "\$(OBJDIR)/linenoise.o:\t\$(SRCDIR)/linenoise.c \$(SRCDIR)/linenoise.h"
writeln "\t\$(XTCC) -c \$(SRCDIR)/linenoise.c -o \$@\n"

writeln "\$(OBJDIR)/th.o:\t\$(SRCDIR)/th.c"
writeln "\t\$(XTCC) -c \$(SRCDIR)/th.c -o \$@\n"







|







412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
  writeln "\$(OBJDIR)/$s.h:\t\$(OBJDIR)/headers\n"
}

writeln "\$(OBJDIR)/sqlite3.o:\t\$(SRCDIR)/sqlite3.c"
writeln "\t\$(XTCC) \$(SQLITE_OPTIONS) \$(SQLITE_CFLAGS) -c \$(SRCDIR)/sqlite3.c -o \$@\n"

writeln "\$(OBJDIR)/shell.o:\t\$(SRCDIR)/shell.c \$(SRCDIR)/sqlite3.h"
writeln "\t\$(XTCC) \$(SHELL_OPTIONS) \$(SHELL_CFLAGS) \$(LINENOISE_DEF.\$(USE_LINENOISE)) -c \$(SRCDIR)/shell.c -o \$@\n"

writeln "\$(OBJDIR)/linenoise.o:\t\$(SRCDIR)/linenoise.c \$(SRCDIR)/linenoise.h"
writeln "\t\$(XTCC) -c \$(SRCDIR)/linenoise.c -o \$@\n"

writeln "\$(OBJDIR)/th.o:\t\$(SRCDIR)/th.c"
writeln "\t\$(XTCC) -c \$(SRCDIR)/th.c -o \$@\n"

454
455
456
457
458
459
460




461
462
463
464
465
466
467
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#





#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-






>
>
>
>







466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#
# Some of the special options which can be passed to make
#   USE_WINDOWS=1    if building under a windows command prompt
#   X64=1            if using an unprefixed 64-bit mingw compiler
#

#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-
497
498
499
500
501
502
503




504
505
506
507
508
509
510
#
# FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1





#### Enable legacy treatment of mv/rm (skip checkout files)
#
# FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#






>
>
>
>







513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
#
# FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1

#### Enable relative paths in external diff/gdiff
#
# FOSSIL_ENABLE_EXEC_REL_PATHS = 1

#### Enable legacy treatment of mv/rm (skip checkout files)
#
# FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2d
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If






|







615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2g
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661








662
663
664
665
666
667
668
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Os -Wall

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g








endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)







|
<
<
<
<
<
<






>
>
>
>
>
>
>
>







662
663
664
665
666
667
668
669






670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Wall







#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g
else
TCC += -Os
endif

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)

700
701
702
703
704
705
706






707
708
709
710
711
712
713
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif







# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif







>
>
>
>
>
>







722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif

# With relative paths in external diff/gdiff
ifdef FOSSIL_ENABLE_EXEC_REL_PATHS
TCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
RCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
endif

# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif

1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(LIB) $(OBJDIR)/fossil.o

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop







|







1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o $(LIB)

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop

1329
1330
1331
1332
1333
1334
1335





1336
1337
1338
1339
1340
1341
1342
FOSSIL_BUILD_ZLIB = 1
!endif

# Link everything except SQLite dynamically?
!ifndef FOSSIL_DYNAMIC_BUILD
FOSSIL_DYNAMIC_BUILD = 0
!endif






# Enable the JSON API?
!ifndef FOSSIL_ENABLE_JSON
FOSSIL_ENABLE_JSON = 0
!endif

# Enable legacy treatment of the mv/rm commands?






>
>
>
>
>







1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
FOSSIL_BUILD_ZLIB = 1
!endif

# Link everything except SQLite dynamically?
!ifndef FOSSIL_DYNAMIC_BUILD
FOSSIL_DYNAMIC_BUILD = 0
!endif

# Enable relative paths in external diff/gdiff?
!ifndef FOSSIL_ENABLE_EXEC_REL_PATHS
FOSSIL_ENABLE_EXEC_REL_PATHS = 0
!endif

# Enable the JSON API?
!ifndef FOSSIL_ENABLE_JSON
FOSSIL_ENABLE_JSON = 0
!endif

# Enable legacy treatment of the mv/rm commands?
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
# Enable support for Windows XP with Visual Studio 201x?
!ifndef FOSSIL_ENABLE_WINXP
FOSSIL_ENABLE_WINXP = 0
!endif

!if $(FOSSIL_ENABLE_SSL)!=0
SSLDIR    = $(B)\compat\openssl-1.0.2d
SSLINCDIR = $(SSLDIR)\inc32
!if $(FOSSIL_DYNAMIC_BUILD)!=0
SSLLIBDIR = $(SSLDIR)\out32dll
!else
SSLLIBDIR = $(SSLDIR)\out32
!endif
SSLLFLAGS = /nologo /opt:ref /debug






|







1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
# Enable support for Windows XP with Visual Studio 201x?
!ifndef FOSSIL_ENABLE_WINXP
FOSSIL_ENABLE_WINXP = 0
!endif

!if $(FOSSIL_ENABLE_SSL)!=0
SSLDIR    = $(B)\compat\openssl-1.0.2g
SSLINCDIR = $(SSLDIR)\inc32
!if $(FOSSIL_DYNAMIC_BUILD)!=0
SSLLIBDIR = $(SSLDIR)\out32dll
!else
SSLLIBDIR = $(SSLDIR)\out32
!endif
SSLLFLAGS = /nologo /opt:ref /debug
1547
1548
1549
1550
1551
1552
1553





1554
1555
1556
1557
1558
1559
1560
!if $(FOSSIL_ENABLE_SSL)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_SSL=1
RCC       = $(RCC) /DFOSSIL_ENABLE_SSL=1
LIBS      = $(LIBS) $(SSLLIB)
LIBDIR    = $(LIBDIR) /LIBPATH:$(SSLLIBDIR)
!endif






!if $(FOSSIL_ENABLE_LEGACY_MV_RM)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC       = $(RCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
!endif

!if $(FOSSIL_ENABLE_TH1_DOCS)!=0






>
>
>
>
>







1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
!if $(FOSSIL_ENABLE_SSL)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_SSL=1
RCC       = $(RCC) /DFOSSIL_ENABLE_SSL=1
LIBS      = $(LIBS) $(SSLLIB)
LIBDIR    = $(LIBDIR) /LIBPATH:$(SSLLIBDIR)
!endif

!if $(FOSSIL_ENABLE_EXEC_REL_PATHS)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_EXEC_REL_PATHS=1
RCC       = $(RCC) /DFOSSIL_ENABLE_EXEC_REL_PATHS=1
!endif

!if $(FOSSIL_ENABLE_LEGACY_MV_RM)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC       = $(RCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
!endif

!if $(FOSSIL_ENABLE_TH1_DOCS)!=0
Changes to src/manifest.c.
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  /* Every control artifact ends with a '\n' character.  Exit early
  ** if that is not the case for this artifact.
  */
  if( !isRepeat ) g.parseCnt[0]++;
  z = blob_materialize(pContent);
  n = blob_size(pContent);
  if( pErr && (n<=0 || z[n-1]!='\n') ){
    blob_reset(pContent);
    blob_append(pErr, n ? "not terminated with \\n" : "zero-length", -1);
    return 0;
  }

  /* Strip off the PGP signature if there is one.
  */
  remove_pgp_signature(&z, &n);







|

|







378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  /* Every control artifact ends with a '\n' character.  Exit early
  ** if that is not the case for this artifact.
  */
  if( !isRepeat ) g.parseCnt[0]++;
  z = blob_materialize(pContent);
  n = blob_size(pContent);
  if( n<=0 || z[n-1]!='\n' ){
    blob_reset(pContent);
    blob_appendf(pErr, "%s", n ? "not terminated with \\n" : "zero-length");
    return 0;
  }

  /* Strip off the PGP signature if there is one.
  */
  remove_pgp_signature(&z, &n);

1183
1184
1185
1186
1187
1188
1189





1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202

1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221












1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234

1235
1236
1237
1238
1239
1240
1241
  }
  return mperm;
}

/*
** Add a single entry to the mlink table.  Also add the filename to
** the filename table if it is not there already.





*/
static void add_one_mlink(
  int pmid,                 /* The parent manifest */
  const char *zFromUuid,    /* UUID for content in parent */
  int mid,                  /* The record ID of the manifest */
  const char *zToUuid,      /* UUID for content in child */
  const char *zFilename,    /* Filename */
  const char *zPrior,       /* Previous filename. NULL if unchanged */
  int isPublic,             /* True if mid is not a private manifest */
  int isPrimary,            /* pmid is the primary parent of mid */
  int mperm                 /* 1: exec, 2: symlink */
){
  int fnid, pfnid, pid, fid;

  static Stmt s1;

  fnid = filename_to_fnid(zFilename);
  if( zPrior==0 ){
    pfnid = 0;
  }else{
    pfnid = filename_to_fnid(zPrior);
  }
  if( zFromUuid==0 || zFromUuid[0]==0 ){
    pid = 0;
  }else{
    pid = uuid_to_rid(zFromUuid, 1);
  }
  if( zToUuid==0 || zToUuid[0]==0 ){
    fid = 0;
  }else{
    fid = uuid_to_rid(zToUuid, 1);
    if( isPublic ) content_make_public(fid);
  }












  db_static_prepare(&s1,
    "INSERT INTO mlink(mid,fid,pmid,pid,fnid,pfnid,mperm,isaux)"
    "VALUES(:m,:f,:pm,:p,:n,:pfn,:mp,:isaux)"
  );
  db_bind_int(&s1, ":m", mid);
  db_bind_int(&s1, ":f", fid);
  db_bind_int(&s1, ":pm", pmid);
  db_bind_int(&s1, ":p", pid);
  db_bind_int(&s1, ":n", fnid);
  db_bind_int(&s1, ":pfn", pfnid);
  db_bind_int(&s1, ":mp", mperm);
  db_bind_int(&s1, ":isaux", isPrimary==0);
  db_exec(&s1);

  if( pid && fid ){
    content_deltify(pid, fid, 0);
  }
}

/*
** Do a binary search to find a file in the p->aFile[] array.






>
>
>
>
>













>
|


















>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
>







1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
  }
  return mperm;
}

/*
** Add a single entry to the mlink table.  Also add the filename to
** the filename table if it is not there already.
**
** An mlink entry is always created if isPrimary is true.  But if
** isPrimary is false (meaning that pmid is a merge parent of mid)
** then the mlink entry is only created if there is already an mlink
** from primary parent for the same file.
*/
static void add_one_mlink(
  int pmid,                 /* The parent manifest */
  const char *zFromUuid,    /* UUID for content in parent */
  int mid,                  /* The record ID of the manifest */
  const char *zToUuid,      /* UUID for content in child */
  const char *zFilename,    /* Filename */
  const char *zPrior,       /* Previous filename. NULL if unchanged */
  int isPublic,             /* True if mid is not a private manifest */
  int isPrimary,            /* pmid is the primary parent of mid */
  int mperm                 /* 1: exec, 2: symlink */
){
  int fnid, pfnid, pid, fid;
  int doInsert;
  static Stmt s1, s2;

  fnid = filename_to_fnid(zFilename);
  if( zPrior==0 ){
    pfnid = 0;
  }else{
    pfnid = filename_to_fnid(zPrior);
  }
  if( zFromUuid==0 || zFromUuid[0]==0 ){
    pid = 0;
  }else{
    pid = uuid_to_rid(zFromUuid, 1);
  }
  if( zToUuid==0 || zToUuid[0]==0 ){
    fid = 0;
  }else{
    fid = uuid_to_rid(zToUuid, 1);
    if( isPublic ) content_make_public(fid);
  }
  if( isPrimary ){
    doInsert = 1;
  }else{
    db_static_prepare(&s2,
      "SELECT 1 FROM mlink WHERE mid=:m AND fnid=:n AND NOT isaux"
    );
    db_bind_int(&s2, ":m", mid);
    db_bind_int(&s2, ":n", fnid);
    doInsert = db_step(&s2)==SQLITE_ROW;
    db_reset(&s2);
  }
  if( doInsert ){
    db_static_prepare(&s1,
      "INSERT INTO mlink(mid,fid,pmid,pid,fnid,pfnid,mperm,isaux)"
      "VALUES(:m,:f,:pm,:p,:n,:pfn,:mp,:isaux)"
    );
    db_bind_int(&s1, ":m", mid);
    db_bind_int(&s1, ":f", fid);
    db_bind_int(&s1, ":pm", pmid);
    db_bind_int(&s1, ":p", pid);
    db_bind_int(&s1, ":n", fnid);
    db_bind_int(&s1, ":pfn", pfnid);
    db_bind_int(&s1, ":mp", mperm);
    db_bind_int(&s1, ":isaux", isPrimary==0);
    db_exec(&s1);
  }
  if( pid && fid ){
    content_deltify(pid, fid, 0);
  }
}

/*
** Do a binary search to find a file in the p->aFile[] array.
1344
1345
1346
1347
1348
1349
1350









1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
** A single mlink entry is added for every file that changed content,
** name, and/or permissions going from pid to cid.
**
** Deleted files have mlink.fid=0.
** Added files have mlink.pid=0.
** File added by merge have mlink.pid=-1
** Edited files have both mlink.pid!=0 and mlink.fid!=0









*/
static void add_mlink(
  int pmid, Manifest *pParent,    /* Parent check-in */
  int mid,  Manifest *pChild,     /* The child check-in */
  int isPrim                      /* TRUE if pmid is the primary parent of mid */
){
  Blob otherContent;
  int otherRid;
  int i, rc;
  ManifestFile *pChildFile, *pParentFile;
  Manifest **ppOther;
  static Stmt eq;






>
>
>
>
>
>
>
>
>


|
|
|







1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
** A single mlink entry is added for every file that changed content,
** name, and/or permissions going from pid to cid.
**
** Deleted files have mlink.fid=0.
** Added files have mlink.pid=0.
** File added by merge have mlink.pid=-1
** Edited files have both mlink.pid!=0 and mlink.fid!=0
**
** Many mlink entries for merge parents will only be added if another mlink
** entry already exists for the same file from the primary parent.  Therefore,
** to ensure that all merge-parent mlink entries are properly created:
**
**    (1) Make this routine a no-op if pParent is a merge parent and the
**        primary parent is a phantom.
**    (2) Invoke this routine recursively for merge-parents if pParent is the
**        primary parent.
*/
static void add_mlink(
  int pmid, Manifest *pParent,  /* Parent check-in */
  int mid,  Manifest *pChild,   /* The child check-in */
  int isPrim                    /* TRUE if pmid is the primary parent of mid */
){
  Blob otherContent;
  int otherRid;
  int i, rc;
  ManifestFile *pChildFile, *pParentFile;
  Manifest **ppOther;
  static Stmt eq;
1390
1391
1392
1393
1394
1395
1396













1397
1398
1399
1400
1401
1402
1403
    if( *ppOther==0 ) return;
  }
  if( fetch_baseline(pParent, 0) || fetch_baseline(pChild, 0) ){
    manifest_destroy(*ppOther);
    return;
  }
  isPublic = !content_is_private(mid);














  /* Try to make the parent manifest a delta from the child, if that
  ** is an appropriate thing to do.  For a new baseline, make the
  ** previous baseline a delta from the current baseline.
  */
  if( (pParent->zBaseline==0)==(pChild->zBaseline==0) ){
    content_deltify(pmid, mid, 0);






>
>
>
>
>
>
>
>
>
>
>
>
>







1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
    if( *ppOther==0 ) return;
  }
  if( fetch_baseline(pParent, 0) || fetch_baseline(pChild, 0) ){
    manifest_destroy(*ppOther);
    return;
  }
  isPublic = !content_is_private(mid);
  
  /* If pParent is not the primary parent of pChild, and the primary
  ** parent of pChild is a phantom, then abort this routine without
  ** doing any work.  The mlink entries will be computed when the
  ** primary parent dephantomizes.
  */
  if( !isPrim && otherRid==mid
   && !db_exists("SELECT 1 FROM blob WHERE uuid=%Q AND size>0",
                 pChild->azParent[0])
  ){
    manifest_cache_insert(*ppOther);
    return;
  }

  /* Try to make the parent manifest a delta from the child, if that
  ** is an appropriate thing to do.  For a new baseline, make the
  ** previous baseline a delta from the current baseline.
  */
  if( (pParent->zBaseline==0)==(pChild->zBaseline==0) ){
    content_deltify(pmid, mid, 0);
1487
1488
1489
1490
1491
1492
1493











1494
1495
1496
1497
1498
1499
1500
      if( pChildFile==0 && pParentFile->zUuid!=0 ){
        add_one_mlink(pmid, pParentFile->zUuid, mid, 0, pParentFile->zName, 0,
                      isPublic, isPrim, 0);
      }
    }
  }
  manifest_cache_insert(*ppOther);











}

/*
** Setup to do multiple manifest_crosslink() calls.
** This is only required if processing ticket changes.
*/
void manifest_crosslink_begin(void){






>
>
>
>
>
>
>
>
>
>
>







1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
      if( pChildFile==0 && pParentFile->zUuid!=0 ){
        add_one_mlink(pmid, pParentFile->zUuid, mid, 0, pParentFile->zName, 0,
                      isPublic, isPrim, 0);
      }
    }
  }
  manifest_cache_insert(*ppOther);
  
  /* If pParent is the primary parent of pChild, also run this analysis
  ** for all merge parents of pChild
  */
  if( isPrim ){
    for(i=1; i<pChild->nParent; i++){
      pmid = uuid_to_rid(pChild->azParent[i], 0);
      if( pmid<=0 ) continue;
      add_mlink(pmid, 0, mid, pChild, 0);
    }
  }
}

/*
** Setup to do multiple manifest_crosslink() calls.
** This is only required if processing ticket changes.
*/
void manifest_crosslink_begin(void){
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805

1806
1807
1808
1809
1810
1811
1812
      }
      for(i=0; i<p->nParent; i++){
        int pid = uuid_to_rid(p->azParent[i], 1);
        db_multi_exec(
           "INSERT OR IGNORE INTO plink(pid, cid, isprim, mtime, baseid)"
           "VALUES(%d, %d, %d, %.17g, %s)",
           pid, rid, i==0, p->rDate, zBaseId/*safe-for-%s*/);
        add_mlink(pid, 0, rid, p, i==0);
        if( i==0 ) parentid = pid;
      }

      if( p->nParent>1 ){
        /* Change MLINK.PID from 0 to -1 for files that are added by merge. */
        db_multi_exec(
           "UPDATE mlink SET pid=-1"
           " WHERE mid=%d"
           "   AND pid=0"
           "   AND fnid IN "






<


>







1848
1849
1850
1851
1852
1853
1854

1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
      }
      for(i=0; i<p->nParent; i++){
        int pid = uuid_to_rid(p->azParent[i], 1);
        db_multi_exec(
           "INSERT OR IGNORE INTO plink(pid, cid, isprim, mtime, baseid)"
           "VALUES(%d, %d, %d, %.17g, %s)",
           pid, rid, i==0, p->rDate, zBaseId/*safe-for-%s*/);

        if( i==0 ) parentid = pid;
      }
      add_mlink(parentid, 0, rid, p, 1);
      if( p->nParent>1 ){
        /* Change MLINK.PID from 0 to -1 for files that are added by merge. */
        db_multi_exec(
           "UPDATE mlink SET pid=-1"
           " WHERE mid=%d"
           "   AND pid=0"
           "   AND fnid IN "
1954
1955
1956
1957
1958
1959
1960

1961
1962
1963
1964
1965
1966
1967
  }
  if( p->type==CFTYPE_EVENT ){
    char *zTag = mprintf("event-%s", p->zEventId);
    int tagid = tag_findid(zTag, 1);
    int prior, subsequent;
    int nWiki;
    char zLength[40];

    while( fossil_isspace(p->zWiki[0]) ) p->zWiki++;
    nWiki = strlen(p->zWiki);
    sqlite3_snprintf(sizeof(zLength), zLength, "%d", nWiki);
    tag_insert(zTag, 1, zLength, rid, p->rDate, rid);
    fossil_free(zTag);
    prior = db_int(0,
      "SELECT rid FROM tagxref"






>







2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
  }
  if( p->type==CFTYPE_EVENT ){
    char *zTag = mprintf("event-%s", p->zEventId);
    int tagid = tag_findid(zTag, 1);
    int prior, subsequent;
    int nWiki;
    char zLength[40];
    Stmt qatt;
    while( fossil_isspace(p->zWiki[0]) ) p->zWiki++;
    nWiki = strlen(p->zWiki);
    sqlite3_snprintf(sizeof(zLength), zLength, "%d", nWiki);
    tag_insert(zTag, 1, zLength, rid, p->rDate, rid);
    fossil_free(zTag);
    prior = db_int(0,
      "SELECT rid FROM tagxref"
1995
1996
1997
1998
1999
2000
2001





















2002







2003
2004

2005
2006
2007
2008
2009
2010




















2011







2012
2013
2014





2015

2016



2017




2018
2019
2020
2021
2022
2023
2024
        "REPLACE INTO event(type,mtime,objid,tagid,user,comment,bgcolor)"
        "VALUES('e',%.17g,%d,%d,%Q,%Q,"
        "  (SELECT value FROM tagxref WHERE tagid=%d AND rid=%d));",
        p->rEventDate, rid, tagid, p->zUser, p->zComment,
        TAG_BGCOLOR, rid
      );
    }





















  }







  if( p->type==CFTYPE_TICKET ){
    char *zTag;

    assert( manifest_crosslink_busy==1 );
    zTag = mprintf("tkt-%s", p->zTicketUuid);
    tag_insert(zTag, 1, 0, rid, p->rDate, rid);
    fossil_free(zTag);
    db_multi_exec("INSERT OR IGNORE INTO pending_tkt VALUES(%Q)",
                  p->zTicketUuid);




















  }







  if( p->type==CFTYPE_ATTACHMENT ){
    char *zComment = 0;
    const char isAdd = (p->zAttachSrc && p->zAttachSrc[0]) ? 1 : 0;





    const char attachToType = fossil_is_uuid(p->zAttachTarget)

      ? 't' /* attach to ticket */



      : 'w' /* attach to wiki page */;




    db_multi_exec(
       "INSERT INTO attachment(attachid, mtime, src, target,"
                              "filename, comment, user)"
       "VALUES(%d,%.17g,%Q,%Q,%Q,%Q,%Q);",
       rid, p->rDate, p->zAttachSrc, p->zAttachTarget, p->zAttachName,
       (p->zComment ? p->zComment : ""), p->zUser
    );






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>


>






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>



>
>
>
>
>
|
>
|
>
>
>
|
>
>
>
>







2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
        "REPLACE INTO event(type,mtime,objid,tagid,user,comment,bgcolor)"
        "VALUES('e',%.17g,%d,%d,%Q,%Q,"
        "  (SELECT value FROM tagxref WHERE tagid=%d AND rid=%d));",
        p->rEventDate, rid, tagid, p->zUser, p->zComment,
        TAG_BGCOLOR, rid
      );
    }
    /* Locate and update comment for any attachments */
    db_prepare(&qatt,
       "SELECT attachid, src, target, filename FROM attachment"
       " WHERE target=%Q",
       p->zEventId
    );
    while( db_step(&qatt)==SQLITE_ROW ){
      const char *zAttachId = db_column_text(&qatt, 0);
      const char *zSrc = db_column_text(&qatt, 1);
      const char *zTarget = db_column_text(&qatt, 2);
      const char *zName = db_column_text(&qatt, 3);
      const char isAdd = (zSrc && zSrc[0]) ? 1 : 0;
      char *zComment;
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to"
             " tech note [/technote/%h|%.10h]",
             zSrc, zName, zTarget, zTarget); 
      }else{
        zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]",
             zName, zTarget);
      }
      db_multi_exec("UPDATE event SET comment=%Q, type='e'"
                       " WHERE objid=%Q",
                    zComment, zAttachId);
      fossil_free(zComment);      
    }
    db_finalize(&qatt);
  }
  if( p->type==CFTYPE_TICKET ){
    char *zTag;
    Stmt qatt;
    assert( manifest_crosslink_busy==1 );
    zTag = mprintf("tkt-%s", p->zTicketUuid);
    tag_insert(zTag, 1, 0, rid, p->rDate, rid);
    fossil_free(zTag);
    db_multi_exec("INSERT OR IGNORE INTO pending_tkt VALUES(%Q)",
                  p->zTicketUuid);
    /* Locate and update comment for any attachments */
    db_prepare(&qatt,
       "SELECT attachid, src, target, filename FROM attachment"
       " WHERE target=%Q",
       p->zTicketUuid
    );
    while( db_step(&qatt)==SQLITE_ROW ){
      const char *zAttachId = db_column_text(&qatt, 0);
      const char *zSrc = db_column_text(&qatt, 1);
      const char *zTarget = db_column_text(&qatt, 2);
      const char *zName = db_column_text(&qatt, 3);
      const char isAdd = (zSrc && zSrc[0]) ? 1 : 0;
      char *zComment;
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]",
             zSrc, zName, zTarget, zTarget);
      }else{
        zComment = mprintf("Delete attachment \"%h\" from ticket [%!S|%S]",
             zName, zTarget, zTarget);
      }
      db_multi_exec("UPDATE event SET comment=%Q, type='t'"
                       " WHERE objid=%Q",
                    zComment, zAttachId);
      fossil_free(zComment);      
    }
    db_finalize(&qatt);
  }
  if( p->type==CFTYPE_ATTACHMENT ){
    char *zComment = 0;
    const char isAdd = (p->zAttachSrc && p->zAttachSrc[0]) ? 1 : 0;
    /* We assume that we're attaching to a wiki page until we
    ** prove otherwise (which could on a later artifact if we
    ** process the attachment artifact before the artifact to
    ** which it is attached!) */
    char attachToType = 'w';       
    if( fossil_is_uuid(p->zAttachTarget) ){
      if( db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'",
            p->zAttachTarget)
        ){
        attachToType = 't';          /* Attaching to known ticket */
      }else if( db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'",
                  p->zAttachTarget) 
            ){
        attachToType = 'e';          /* Attaching to known tech note */
      }
    }
    db_multi_exec(
       "INSERT INTO attachment(attachid, mtime, src, target,"
                              "filename, comment, user)"
       "VALUES(%d,%.17g,%Q,%Q,%Q,%Q,%Q);",
       rid, p->rDate, p->zAttachSrc, p->zAttachTarget, p->zAttachName,
       (p->zComment ? p->zComment : ""), p->zUser
    );
2035
2036
2037
2038
2039
2040
2041









2042
2043
2044
2045
2046
2047
2048
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to wiki page [%h]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget);
      }else{
        zComment = mprintf("Delete attachment \"%h\" from wiki page [%h]",
             p->zAttachName, p->zAttachTarget);
      }









    }else{
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget);
      }else{
        zComment = mprintf("Delete attachment \"%h\" from ticket [%!S|%S]",






>
>
>
>
>
>
>
>
>







2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to wiki page [%h]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget);
      }else{
        zComment = mprintf("Delete attachment \"%h\" from wiki page [%h]",
             p->zAttachName, p->zAttachTarget);
      }
    }else if( 'e' == attachToType ){
      if( isAdd ){
        zComment = mprintf(
          "Add attachment [/artifact/%!S|%h] to tech note [/technote/%h|%.10h]",
          p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); 
      }else{
        zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]",
             p->zAttachName, p->zAttachTarget);
      }      
    }else{
      if( isAdd ){
        zComment = mprintf(
             "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]",
             p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget);
      }else{
        zComment = mprintf("Delete attachment \"%h\" from ticket [%!S|%S]",
2108
2109
2110
2111
2112
2113
2114

2115

2116
2117
2118
2119
2120
2121
2122
      }else if( strcmp(zName, "+user")==0 ){
        blob_appendf(&comment, " Change user to \"%h\".", zValue);
        continue;
      }else if( strcmp(zName, "+date")==0 ){
        blob_appendf(&comment, " Timestamp %h.", zValue);
        continue;
      }else if( memcmp(zName, "-sym-",5)==0 ){

        if( !branchMove ) blob_appendf(&comment, " Cancel tag \"%h\"", &zName[5]);

      }else if( memcmp(zName, "*sym-",5)==0 ){
        if( !branchMove ){
          blob_appendf(&comment, " Add propagating tag \"%h\"", &zName[5]);
        }
      }else if( memcmp(zName, "+sym-",5)==0 ){
        blob_appendf(&comment, " Add tag \"%h\"", &zName[5]);
      }else if( strcmp(zName, "+closed")==0 ){






>
|
>







2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
      }else if( strcmp(zName, "+user")==0 ){
        blob_appendf(&comment, " Change user to \"%h\".", zValue);
        continue;
      }else if( strcmp(zName, "+date")==0 ){
        blob_appendf(&comment, " Timestamp %h.", zValue);
        continue;
      }else if( memcmp(zName, "-sym-",5)==0 ){
        if( !branchMove ){
          blob_appendf(&comment, " Cancel tag \"%h\"", &zName[5]);
        }
      }else if( memcmp(zName, "*sym-",5)==0 ){
        if( !branchMove ){
          blob_appendf(&comment, " Add propagating tag \"%h\"", &zName[5]);
        }
      }else if( memcmp(zName, "+sym-",5)==0 ){
        blob_appendf(&comment, " Add tag \"%h\"", &zName[5]);
      }else if( strcmp(zName, "+closed")==0 ){
Changes to src/markdown.md.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# Markdown formatting rules

In addition to its native Wiki formatting syntax, Fossil supports Markdown syntax as specified by 
[John Gruber's original Markdown implementation](http://daringfireball.net/projects/markdown/). 
For lots of examples - not repeated here - please refer to its 
[syntax description](http://daringfireball.net/projects/markdown/syntax), of which the page you
are reading is an extract.

This page itself uses Markdown formatting.

## Summary

  - Block elements

      * A **paragraph** is a group of consecutive lines. Paragraphs are separated by blank lines.

      * A **Header** is a line of text underlined with equal signs or hyphens, or prefixed by a 
        number of hash marks.

      * **Block quotes** are blocks of text prefixed by '>'.

      * **Ordered list** items are prefixed by a number and a period. **Unordered list** items
        are prefixed by a hyphen, asterisk or plus sign. Prefix and item text are separated by
        whitespace. 

      * **Code blocks** are formed by lines of text (possibly including empty lines) prefixed by
        at least 4 spaces or a tab.

      * A **horizontal rule** is a line consisting of 3 or more asterisks, hyphens or underscores,
        with optional whitespace between them.

  - Span elements

      * 3 types of **links** exist:

        - **automatic links** are URLs or email addresses enclosed in angle brackets
          ('<' and '>'), and are displayed as such.

        - **inline links** consist of the displayed link text in square brackets ('[' and ']'), 
          followed by the link target in parentheses. 

        - **reference links** separate _link instance_ from _link definition_. A link instance 
          consists of the displayed link text in square brackets, followed by a link definition name 
          in square brackets. 
          The corresponding link definition can occur anywhere on the page, and consists
          of the link definition name in square brackets followed by a colon, whitespace and the 
          link target.

      * **Emphasis** can be given by wrapping text in one or two asterisks or underscores - use
        one for HTML `<em>`, and two for `<strong>` emphasis.

      * A **code span** is text wrapped in backticks ('`').

      * **Images** use a syntax much like inline or reference links, but with alt attribute text 
        ('img alt=...') instead of link text, and the first pair of square
        brackets in an image instance prefixed by an exclamation mark.

  - **Inline HTML** is mostly interpreted automatically.

  - **Escaping** Markdown punctuation characters is done by prefixing them by a backslash ('\\').


|
|
|











|






|














|
|

|
|
|

|







|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# Markdown formatting rules

In addition to its native Wiki formatting syntax, Fossil supports Markdown syntax as specified by
[John Gruber's original Markdown implementation](http://daringfireball.net/projects/markdown/).
For lots of examples - not repeated here - please refer to its
[syntax description](http://daringfireball.net/projects/markdown/syntax), of which the page you
are reading is an extract.

This page itself uses Markdown formatting.

## Summary

  - Block elements

      * A **paragraph** is a group of consecutive lines. Paragraphs are separated by blank lines.

      * A **Header** is a line of text underlined with equal signs or hyphens, or prefixed by a
        number of hash marks.

      * **Block quotes** are blocks of text prefixed by '>'.

      * **Ordered list** items are prefixed by a number and a period. **Unordered list** items
        are prefixed by a hyphen, asterisk or plus sign. Prefix and item text are separated by
        whitespace.

      * **Code blocks** are formed by lines of text (possibly including empty lines) prefixed by
        at least 4 spaces or a tab.

      * A **horizontal rule** is a line consisting of 3 or more asterisks, hyphens or underscores,
        with optional whitespace between them.

  - Span elements

      * 3 types of **links** exist:

        - **automatic links** are URLs or email addresses enclosed in angle brackets
          ('<' and '>'), and are displayed as such.

        - **inline links** consist of the displayed link text in square brackets ('[' and ']'),
          followed by the link target in parentheses.

        - **reference links** separate _link instance_ from _link definition_. A link instance
          consists of the displayed link text in square brackets, followed by a link definition name
          in square brackets.
          The corresponding link definition can occur anywhere on the page, and consists
          of the link definition name in square brackets followed by a colon, whitespace and the
          link target.

      * **Emphasis** can be given by wrapping text in one or two asterisks or underscores - use
        one for HTML `<em>`, and two for `<strong>` emphasis.

      * A **code span** is text wrapped in backticks ('`').

      * **Images** use a syntax much like inline or reference links, but with alt attribute text
        ('img alt=...') instead of link text, and the first pair of square
        brackets in an image instance prefixed by an exclamation mark.

  - **Inline HTML** is mostly interpreted automatically.

  - **Escaping** Markdown punctuation characters is done by prefixing them by a backslash ('\\').

86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
level.

### Block quotes

Not every line in a paragraph needs to be prefixed by '>' in order to make it a block quote,
only the first line.

Block quoted paragraphs can be nested by using multiple '>' characters as prefix. 

Within a block quote, Markdown formatting (e.g. lists, emphasis) still works as normal.

### Lists

A list item prefix need not occur first on its line; up to 3 leading spaces are allowed
(4 spaces would make a code block out of the following text).

For unordered lists, asterisks, hyphens and plus signs can be used interchangeably.

For ordered lists, arbitrary numbers can be used as part of an item prefix; the items will be 
renumbered during rendering. However, future implementations may demand that the number used 
for the first item in a list indicates an offset to be used for subsequent items.

For list items spanning multiple lines, subsequent lines can be indented using an arbitrary amount
of whitespace.

List items will be wrapped in HTML `<p>` tags if they are separated by blank lines.

A list item may span multiple paragraphs. At least the first line of each such paragraph must 
be indented using at least 4 spaces or a tab character.

Block quotes within list items must have their '>' delimiters indented using 4 up to 7 spaces.

Code blocks within list items need to be indented _twice_, that is, using 8 spaces or 2 tab
characters.







|










|
|







|







86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
level.

### Block quotes

Not every line in a paragraph needs to be prefixed by '>' in order to make it a block quote,
only the first line.

Block quoted paragraphs can be nested by using multiple '>' characters as prefix.

Within a block quote, Markdown formatting (e.g. lists, emphasis) still works as normal.

### Lists

A list item prefix need not occur first on its line; up to 3 leading spaces are allowed
(4 spaces would make a code block out of the following text).

For unordered lists, asterisks, hyphens and plus signs can be used interchangeably.

For ordered lists, arbitrary numbers can be used as part of an item prefix; the items will be
renumbered during rendering. However, future implementations may demand that the number used
for the first item in a list indicates an offset to be used for subsequent items.

For list items spanning multiple lines, subsequent lines can be indented using an arbitrary amount
of whitespace.

List items will be wrapped in HTML `<p>` tags if they are separated by blank lines.

A list item may span multiple paragraphs. At least the first line of each such paragraph must
be indented using at least 4 spaces or a tab character.

Block quotes within list items must have their '>' delimiters indented using 4 up to 7 spaces.

Code blocks within list items need to be indented _twice_, that is, using 8 spaces or 2 tab
characters.

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
Regular Markdown syntax is not processed within code blocks.

### Links

#### Automatic links

When rendering automatic links to email addresses, HTML encoding obfuscation is used to 
prevent some spambots from harvesting.

#### Inline links

Links to resources on the same server can use relative paths (i.e. can start with a '/').

An optional title for the link (e.g. to have mouseover text in the browser) may be given behind 
the link target but within the parentheses, in single and double quotes, and separated from the 
link target by whitespace. 

#### Reference links

> Each reference link consists of 
>
>   - one or more _link instances_ at appropriate locations in the page text
>   - a single _link definition_ at an arbitrary location on the page
> 
> During rendering, each link instance is resolved, and the corresponding definition is
> filled in. No separate link definition clauses occur in the rendered output.
> 
> There are 3 fields involved in link instances and definitions:
>
>   - link text (i.e. the text that is displayed at the resulting link)
>   - link definition name (i.e. an unique ID binding link instances to link definition)
>   - link target (a target URL for the link)

Multiple link instances may reference the same link definition using its link definition






|






|
|
|



|



|


|







129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
Regular Markdown syntax is not processed within code blocks.

### Links

#### Automatic links

When rendering automatic links to email addresses, HTML encoding obfuscation is used to
prevent some spambots from harvesting.

#### Inline links

Links to resources on the same server can use relative paths (i.e. can start with a '/').

An optional title for the link (e.g. to have mouseover text in the browser) may be given behind
the link target but within the parentheses, in single and double quotes, and separated from the
link target by whitespace.

#### Reference links

> Each reference link consists of
>
>   - one or more _link instances_ at appropriate locations in the page text
>   - a single _link definition_ at an arbitrary location on the page
>
> During rendering, each link instance is resolved, and the corresponding definition is
> filled in. No separate link definition clauses occur in the rendered output.
>
> There are 3 fields involved in link instances and definitions:
>
>   - link text (i.e. the text that is displayed at the resulting link)
>   - link definition name (i.e. an unique ID binding link instances to link definition)
>   - link target (a target URL for the link)

Multiple link instances may reference the same link definition using its link definition
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
side of emphasis start or end punctuation characters.

### Code spans

To include a literal backtick character in a code span, use multiple backticks as opening and
closing delimiters.

Whitespace may exist immediately after the opening delimiter and before the closing delimiter 
of a code span, to allow for code fragments starting or ending with a backtick.

Within a code span - like within a code block - angle brackets and ampersands are automatically encoded to make including
HTML fragments easier.

### Images

If necessary, HTML must be used to specify image dimensions. Markdown has no provision for this.

### Inline HTML

Start and end tags of 
a HTML block level construct (`<div>`, `<table>` etc) must be separated from surrounding
context using blank lines, and must both occur at the start of a line.

No extra unwanted `<p>` HTML tags are added around HTML block level tags.

Markdown formatting within HTML block level tags is not processed; however, formatting within 
span level tags (e.g. `<mark>`) is processed normally.

### Escaping Markdown punctuation

The following punctuation characters can be escaped using backslash:

  - \\   backslash






|











|





|







195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
side of emphasis start or end punctuation characters.

### Code spans

To include a literal backtick character in a code span, use multiple backticks as opening and
closing delimiters.

Whitespace may exist immediately after the opening delimiter and before the closing delimiter
of a code span, to allow for code fragments starting or ending with a backtick.

Within a code span - like within a code block - angle brackets and ampersands are automatically encoded to make including
HTML fragments easier.

### Images

If necessary, HTML must be used to specify image dimensions. Markdown has no provision for this.

### Inline HTML

Start and end tags of
a HTML block level construct (`<div>`, `<table>` etc) must be separated from surrounding
context using blank lines, and must both occur at the start of a line.

No extra unwanted `<p>` HTML tags are added around HTML block level tags.

Markdown formatting within HTML block level tags is not processed; however, formatting within
span level tags (e.g. `<mark>`) is processed normally.

### Escaping Markdown punctuation

The following punctuation characters can be escaped using backslash:

  - \\   backslash
Changes to src/markdown_html.c.
97
98
99
100
101
102
103
104














105
106
107
108
109
110
111
112
113
114
115
static void html_epilog(struct Blob *ob, void *opaque){
  INTER_BLOCK(ob);
  BLOB_APPEND_LITERAL(ob, "</div>\n");
}

static void html_raw_block(struct Blob *ob, struct Blob *text, void *opaque){
  char *data = blob_buffer(text);
  size_t first = 0, size = blob_size(text);














  INTER_BLOCK(ob);
  while( first<size && data[first]=='\n' ) first++;
  while( size>first && data[size-1]=='\n' ) size--;
  blob_append(ob, data+first, size-first);
  BLOB_APPEND_LITERAL(ob, "\n");
}

static void html_blockcode(struct Blob *ob, struct Blob *text, void *opaque){
  INTER_BLOCK(ob);
  BLOB_APPEND_LITERAL(ob, "<pre><code>");
  html_escape(ob, blob_buffer(text), blob_size(text));






|
>
>
>
>
>
>
>
>
>
>
>
>
>
>

<
<
|







97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119


120
121
122
123
124
125
126
127
static void html_epilog(struct Blob *ob, void *opaque){
  INTER_BLOCK(ob);
  BLOB_APPEND_LITERAL(ob, "</div>\n");
}

static void html_raw_block(struct Blob *ob, struct Blob *text, void *opaque){
  char *data = blob_buffer(text);
  size_t size = blob_size(text);
  Blob *title = (Blob*)opaque;
  while( size>0 && fossil_isspace(data[0]) ){ data++; size--; }
  while( size>0 && fossil_isspace(data[size-1]) ){ size--; }
  /* If the first raw block is an <h1> element, then use it as the title. */
  if( blob_size(ob)<=PROLOG_SIZE
   && size>9
   && title!=0
   && sqlite3_strnicmp("<h1",data,3)==0
   && sqlite3_strnicmp("</h1>", &data[size-5],5)==0
  ){
    int nTag = htmlTagLength(data);
    blob_append(title, data+nTag, size - nTag - 5);
    return;
  }
  INTER_BLOCK(ob);


  blob_append(ob, data, size);
  BLOB_APPEND_LITERAL(ob, "\n");
}

static void html_blockcode(struct Blob *ob, struct Blob *text, void *opaque){
  INTER_BLOCK(ob);
  BLOB_APPEND_LITERAL(ob, "<pre><code>");
  html_escape(ob, blob_buffer(text), blob_size(text));
128
129
130
131
132
133
134
135
136

137
138
139
140
141
142
143
  struct Blob *text,
  int level,
  void *opaque
){
  struct Blob *title = opaque;
  /* The first header at the beginning of a text is considered as
   * a title and not output. */
  if( blob_size(ob)<=PROLOG_SIZE && blob_size(title)==0 ){
    BLOB_APPEND_BLOB(title, text);

  }
  INTER_BLOCK(ob);
  blob_appendf(ob, "<h%d>", level);
  BLOB_APPEND_BLOB(ob, text);
  blob_appendf(ob, "</h%d>", level);
}







|

>







140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
  struct Blob *text,
  int level,
  void *opaque
){
  struct Blob *title = opaque;
  /* The first header at the beginning of a text is considered as
   * a title and not output. */
  if( blob_size(ob)<=PROLOG_SIZE && title!=0 && blob_size(title)==0 ){
    BLOB_APPEND_BLOB(title, text);
    return;
  }
  INTER_BLOCK(ob);
  blob_appendf(ob, "<h%d>", level);
  BLOB_APPEND_BLOB(ob, text);
  blob_appendf(ob, "</h%d>", level);
}

252
253
254
255
256
257
258

259
260
261
262
263
264
265
}



/* HTML span tags */

static int html_raw_span(struct Blob *ob, struct Blob *text, void *opaque){

  BLOB_APPEND_BLOB(ob, text);
  return 1;
}

static int html_autolink(
  struct Blob *ob,
  struct Blob *link,






>







265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
}



/* HTML span tags */

static int html_raw_span(struct Blob *ob, struct Blob *text, void *opaque){
  /* If the document begins with a <h1> markup, take that as the header. */
  BLOB_APPEND_BLOB(ob, text);
  return 1;
}

static int html_autolink(
  struct Blob *ob,
  struct Blob *link,
368
369
370
371
372
373
374
375





376
377
378
379
380
381
382
383
384
385
386
}


static void html_normal_text(struct Blob *ob, struct Blob *text, void *opaque){
  html_escape(ob, blob_buffer(text), blob_size(text));
}







void markdown_to_html(
  struct Blob *input_markdown,
  struct Blob *output_title,
  struct Blob *output_body
){
  struct mkd_renderer html_renderer = {
    /* prolog and epilog */
    html_prolog,
    html_epilog,

    /* block level elements */






|
>
>
>
>
>

|
|
|







382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
}


static void html_normal_text(struct Blob *ob, struct Blob *text, void *opaque){
  html_escape(ob, blob_buffer(text), blob_size(text));
}

/*
** Convert markdown into HTML.
**
** The document title is placed in output_title if not NULL.  Or if
** output_title is NULL, the document title appears in the body.
*/
void markdown_to_html(
  struct Blob *input_markdown,   /* Markdown content to be rendered */
  struct Blob *output_title,     /* Put title here.  May be NULL */
  struct Blob *output_body       /* Put document body here. */
){
  struct mkd_renderer html_renderer = {
    /* prolog and epilog */
    html_prolog,
    html_epilog,

    /* block level elements */
413
414
415
416
417
418
419
420
421
422
423
    /* misc. parameters */
    64, /* maximum stack */
    "*_", /* emphasis characters */
    0 /* opaque data */
  };
  html_renderer.opaque = output_title;
  blob_reset(output_title);
  blob_reset(output_body);
  markdown(output_body, input_markdown, &html_renderer);
}






|



432
433
434
435
436
437
438
439
440
441
442
    /* misc. parameters */
    64, /* maximum stack */
    "*_", /* emphasis characters */
    0 /* opaque data */
  };
  html_renderer.opaque = output_title;
  if( output_title ) blob_reset(output_title);
  blob_reset(output_body);
  markdown(output_body, input_markdown, &html_renderer);
}
Changes to src/merge.c.
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*
** Print information about a particular check-in.
*/
void print_checkin_description(int rid, int indent, const char *zLabel){
  Stmt q;
  db_prepare(&q,
     "SELECT datetime(mtime%s),"
     "       coalesce(euser,user), coalesce(ecomment,comment),"
     "       (SELECT uuid FROM blob WHERE rid=%d),"
     "       (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref"
     "         WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid"
     "           AND tagxref.rid=%d AND tagxref.tagtype>0)"
     "  FROM event WHERE objid=%d", timeline_utc(), rid, rid, rid);
  if( db_step(&q)==SQLITE_ROW ){
    const char *zTagList = db_column_text(&q, 4);
    char *zCom;
    if( zTagList && zTagList[0] ){
      zCom = mprintf("%s (%s)", db_column_text(&q, 2), zTagList);
    }else{
      zCom = mprintf("%s", db_column_text(&q,2));






|





|







24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*
** Print information about a particular check-in.
*/
void print_checkin_description(int rid, int indent, const char *zLabel){
  Stmt q;
  db_prepare(&q,
     "SELECT datetime(mtime,toLocal()),"
     "       coalesce(euser,user), coalesce(ecomment,comment),"
     "       (SELECT uuid FROM blob WHERE rid=%d),"
     "       (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref"
     "         WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid"
     "           AND tagxref.rid=%d AND tagxref.tagtype>0)"
     "  FROM event WHERE objid=%d", rid, rid, rid);
  if( db_step(&q)==SQLITE_ROW ){
    const char *zTagList = db_column_text(&q, 4);
    char *zCom;
    if( zTagList && zTagList[0] ){
      zCom = mprintf("%s (%s)", db_column_text(&q, 2), zTagList);
    }else{
      zCom = mprintf("%s", db_column_text(&q,2));
225
226
227
228
229
230
231






232
233
234
235
236
237
238
  verify_all_options();
  db_must_be_within_tree();
  if( zBinGlob==0 ) zBinGlob = db_get("binary-glob",0);
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_fatal("nothing is checked out");
  }







  /* Find mid, the artifactID of the version to be merged into the current
  ** check-out */
  if( g.argc==3 ){
    /* Mid is specified as an argument on the command-line */
    mid = name_to_typed_rid(g.argv[2], "ci");
    if( mid==0 || !is_a_version(mid) ){






>
>
>
>
>
>







225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
  verify_all_options();
  db_must_be_within_tree();
  if( zBinGlob==0 ) zBinGlob = db_get("binary-glob",0);
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_fatal("nothing is checked out");
  }
  if( !dryRunFlag ){
    if( autosync_loop(SYNC_PULL + SYNC_VERBOSE*verboseFlag,
                      db_get_int("autosync-tries", 1)) ){
      fossil_fatal("Cannot proceed with merge");
    }
  }

  /* Find mid, the artifactID of the version to be merged into the current
  ** check-out */
  if( g.argc==3 ){
    /* Mid is specified as an argument on the command-line */
    mid = name_to_typed_rid(g.argv[2], "ci");
    if( mid==0 || !is_a_version(mid) ){
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
        db_text(0, "SELECT value FROM tagxref"
                   " WHERE tagid=%d AND rid=%d AND tagtype>0",
                   TAG_BRANCH, vid)
      );
    }
    db_prepare(&q,
      "SELECT blob.uuid,"
          "   datetime(event.mtime%s),"
          "   coalesce(ecomment, comment),"
          "   coalesce(euser, user)"
      "  FROM event, blob"
      " WHERE event.objid=%d AND blob.rid=%d",
      timeline_utc(), mid, mid
    );
    if( db_step(&q)==SQLITE_ROW ){
      char *zCom = mprintf("Merging fork [%S] at %s by %s: \"%s\"",
            db_column_text(&q, 0), db_column_text(&q, 1),
            db_column_text(&q, 3), db_column_text(&q, 2));
      comment_print(zCom, db_column_text(&q,2), 0, -1, g.comFmtFlags);
      fossil_free(zCom);






|




|







261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
        db_text(0, "SELECT value FROM tagxref"
                   " WHERE tagid=%d AND rid=%d AND tagtype>0",
                   TAG_BRANCH, vid)
      );
    }
    db_prepare(&q,
      "SELECT blob.uuid,"
          "   datetime(event.mtime,toLocal()),"
          "   coalesce(ecomment, comment),"
          "   coalesce(euser, user)"
      "  FROM event, blob"
      " WHERE event.objid=%d AND blob.rid=%d",
      mid, mid
    );
    if( db_step(&q)==SQLITE_ROW ){
      char *zCom = mprintf("Merging fork [%S] at %s by %s: \"%s\"",
            db_column_text(&q, 0), db_column_text(&q, 1),
            db_column_text(&q, 3), db_column_text(&q, 2));
      comment_print(zCom, db_column_text(&q,2), 0, -1, g.comFmtFlags);
      fossil_free(zCom);
Changes to src/name.c.
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    if( rid ) return rid;
  }

  /* Date and times */
  if( memcmp(zTag, "date:", 5)==0 ){
    rid = db_int(0,
      "SELECT objid FROM event"
      " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'"
      " ORDER BY mtime DESC LIMIT 1",
      &zTag[5], zType);
    return rid;
  }
  if( fossil_isdate(zTag) ){
    rid = db_int(0,
      "SELECT objid FROM event"
      " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'"
      " ORDER BY mtime DESC LIMIT 1",
      zTag, zType);
    if( rid) return rid;
  }

  /* Deprecated date & time formats:   "local:" + date-time and
  ** "utc:" + date-time */






|







|







152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    if( rid ) return rid;
  }

  /* Date and times */
  if( memcmp(zTag, "date:", 5)==0 ){
    rid = db_int(0,
      "SELECT objid FROM event"
      " WHERE mtime<=julianday(%Q,fromLocal()) AND type GLOB '%q'"
      " ORDER BY mtime DESC LIMIT 1",
      &zTag[5], zType);
    return rid;
  }
  if( fossil_isdate(zTag) ){
    rid = db_int(0,
      "SELECT objid FROM event"
      " WHERE mtime<=julianday(%Q,fromLocal()) AND type GLOB '%q'"
      " ORDER BY mtime DESC LIMIT 1",
      zTag, zType);
    if( rid) return rid;
  }

  /* Deprecated date & time formats:   "local:" + date-time and
  ** "utc:" + date-time */
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
*/
void whatis_rid(int rid, int verboseFlag){
  Stmt q;
  int cnt;

  /* Basic information about the object. */
  db_prepare(&q,
     "SELECT uuid, size, datetime(mtime%s), ipaddr"
     "  FROM blob, rcvfrom"
     " WHERE rid=%d"
     "   AND rcvfrom.rcvid=blob.rcvid",
     timeline_utc(), rid);
  if( db_step(&q)==SQLITE_ROW ){
    if( verboseFlag ){
      fossil_print("artifact:   %s (%d)\n", db_column_text(&q,0), rid);
      fossil_print("size:       %d bytes\n", db_column_int(&q,1));
      fossil_print("received:   %s from %s\n",
         db_column_text(&q, 2),
         db_column_text(&q, 3));






|



|







528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
*/
void whatis_rid(int rid, int verboseFlag){
  Stmt q;
  int cnt;

  /* Basic information about the object. */
  db_prepare(&q,
     "SELECT uuid, size, datetime(mtime,toLocal()), ipaddr"
     "  FROM blob, rcvfrom"
     " WHERE rid=%d"
     "   AND rcvfrom.rcvid=blob.rcvid",
     rid);
  if( db_step(&q)==SQLITE_ROW ){
    if( verboseFlag ){
      fossil_print("artifact:   %s (%d)\n", db_column_text(&q,0), rid);
      fossil_print("size:       %d bytes\n", db_column_int(&q,1));
      fossil_print("received:   %s from %s\n",
         db_column_text(&q, 2),
         db_column_text(&q, 3));
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
    fossil_print("%s%s", zPrefix, db_column_text(&q,0));
  }
  if( cnt ) fossil_print("\n");
  db_finalize(&q);

  /* Check for entries on the timeline that reference this object */
  db_prepare(&q,
     "SELECT type, datetime(mtime%s),"
     "       coalesce(euser,user), coalesce(ecomment,comment)"
     "  FROM event WHERE objid=%d", timeline_utc(), rid);
  if( db_step(&q)==SQLITE_ROW ){
    const char *zType;
    switch( db_column_text(&q,0)[0] ){
      case 'c':  zType = "Check-in";       break;
      case 'w':  zType = "Wiki-edit";      break;
      case 'e':  zType = "Event";          break;
      case 't':  zType = "Ticket-change";  break;
      case 'g':  zType = "Tag-change";     break;
      default:   zType = "Unknown";        break;
    }
    fossil_print("type:       %s by %s on %s\n", zType, db_column_text(&q,2),
                 db_column_text(&q, 1));
    fossil_print("comment:    ");
    comment_print(db_column_text(&q,3), 0, 12, -1, g.comFmtFlags);
  }
  db_finalize(&q);

  /* Check to see if this object is used as a file in a check-in */
  db_prepare(&q,
    "SELECT filename.name, blob.uuid, datetime(event.mtime%s),"
    "       coalesce(euser,user), coalesce(ecomment,comment)"
    "  FROM mlink, filename, blob, event"
    " WHERE mlink.fid=%d"
    "   AND filename.fnid=mlink.fnid"
    "   AND event.objid=mlink.mid"
    "   AND blob.rid=mlink.mid"
    " ORDER BY event.mtime DESC /*sort*/",
    timeline_utc(), rid);
  while( db_step(&q)==SQLITE_ROW ){
    fossil_print("file:       %s\n", db_column_text(&q,0));
    fossil_print("            part of [%S] by %s on %s\n",
      db_column_text(&q, 1),
      db_column_text(&q, 3),
      db_column_text(&q, 2));
    fossil_print("            ");
    comment_print(db_column_text(&q,4), 0, 12, -1, g.comFmtFlags);
  }
  db_finalize(&q);

  /* Check to see if this object is used as an attachment */
  db_prepare(&q,
    "SELECT attachment.filename,"
    "       attachment.comment,"
    "       attachment.user,"
    "       datetime(attachment.mtime%s),"
    "       attachment.target,"
    "       CASE WHEN EXISTS(SELECT 1 FROM tag WHERE tagname=('tkt-'||target))"
    "            THEN 'ticket'"
    "       WHEN EXISTS(SELECT 1 FROM tag WHERE tagname=('wiki-'||target))"
    "            THEN 'wiki' END,"
    "       attachment.attachid,"
    "       (SELECT uuid FROM blob WHERE rid=attachid)"
    "  FROM attachment JOIN blob ON attachment.src=blob.uuid"
    " WHERE blob.rid=%d",
    timeline_utc(), rid
  );
  while( db_step(&q)==SQLITE_ROW ){
    fossil_print("attachment: %s\n", db_column_text(&q,0));
    fossil_print("            attached to %s %s\n",
                 db_column_text(&q,5), db_column_text(&q,4));
    if( verboseFlag ){
      fossil_print("            via %s (%d)\n",






|

|



















|







|
















|









|







583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
    fossil_print("%s%s", zPrefix, db_column_text(&q,0));
  }
  if( cnt ) fossil_print("\n");
  db_finalize(&q);

  /* Check for entries on the timeline that reference this object */
  db_prepare(&q,
     "SELECT type, datetime(mtime,toLocal()),"
     "       coalesce(euser,user), coalesce(ecomment,comment)"
     "  FROM event WHERE objid=%d", rid);
  if( db_step(&q)==SQLITE_ROW ){
    const char *zType;
    switch( db_column_text(&q,0)[0] ){
      case 'c':  zType = "Check-in";       break;
      case 'w':  zType = "Wiki-edit";      break;
      case 'e':  zType = "Event";          break;
      case 't':  zType = "Ticket-change";  break;
      case 'g':  zType = "Tag-change";     break;
      default:   zType = "Unknown";        break;
    }
    fossil_print("type:       %s by %s on %s\n", zType, db_column_text(&q,2),
                 db_column_text(&q, 1));
    fossil_print("comment:    ");
    comment_print(db_column_text(&q,3), 0, 12, -1, g.comFmtFlags);
  }
  db_finalize(&q);

  /* Check to see if this object is used as a file in a check-in */
  db_prepare(&q,
    "SELECT filename.name, blob.uuid, datetime(event.mtime,toLocal()),"
    "       coalesce(euser,user), coalesce(ecomment,comment)"
    "  FROM mlink, filename, blob, event"
    " WHERE mlink.fid=%d"
    "   AND filename.fnid=mlink.fnid"
    "   AND event.objid=mlink.mid"
    "   AND blob.rid=mlink.mid"
    " ORDER BY event.mtime DESC /*sort*/",
    rid);
  while( db_step(&q)==SQLITE_ROW ){
    fossil_print("file:       %s\n", db_column_text(&q,0));
    fossil_print("            part of [%S] by %s on %s\n",
      db_column_text(&q, 1),
      db_column_text(&q, 3),
      db_column_text(&q, 2));
    fossil_print("            ");
    comment_print(db_column_text(&q,4), 0, 12, -1, g.comFmtFlags);
  }
  db_finalize(&q);

  /* Check to see if this object is used as an attachment */
  db_prepare(&q,
    "SELECT attachment.filename,"
    "       attachment.comment,"
    "       attachment.user,"
    "       datetime(attachment.mtime,toLocal()),"
    "       attachment.target,"
    "       CASE WHEN EXISTS(SELECT 1 FROM tag WHERE tagname=('tkt-'||target))"
    "            THEN 'ticket'"
    "       WHEN EXISTS(SELECT 1 FROM tag WHERE tagname=('wiki-'||target))"
    "            THEN 'wiki' END,"
    "       attachment.attachid,"
    "       (SELECT uuid FROM blob WHERE rid=attachid)"
    "  FROM attachment JOIN blob ON attachment.src=blob.uuid"
    " WHERE blob.rid=%d",
    rid
  );
  while( db_step(&q)==SQLITE_ROW ){
    fossil_print("attachment: %s\n", db_column_text(&q,0));
    fossil_print("            attached to %s %s\n",
                 db_column_text(&q,5), db_column_text(&q,4));
    if( verboseFlag ){
      fossil_print("            via %s (%d)\n",
668
669
670
671
672
673
674






675
676
677
678
679
680
681
/*
** COMMAND: whatis*
** Usage: %fossil whatis NAME
**
** Resolve the symbol NAME into its canonical 40-character SHA1-hash
** artifact name and provide a description of what role that artifact
** plays.






*/
void whatis_cmd(void){
  int rid;
  const char *zName;
  int verboseFlag;
  int i;
  const char *zType = 0;






>
>
>
>
>
>







668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
/*
** COMMAND: whatis*
** Usage: %fossil whatis NAME
**
** Resolve the symbol NAME into its canonical 40-character SHA1-hash
** artifact name and provide a description of what role that artifact
** plays.
**
** Options:
**
**    --type TYPE          Only find artifacts of TYPE (one of: 'ci', 't',
**                         'w', 'g', or 'e').
**    -v|--verbose         Provide extra information (such as the RID)
*/
void whatis_cmd(void){
  int rid;
  const char *zName;
  int verboseFlag;
  int i;
  const char *zType = 0;
837
838
839
840
841
842
843
844
845
846
847
848

849
850
851
852
853
854
855
    "   AND tagxref.srcid=blob.rid;",
    zWhere /*safe-for-%s*/
  );

  /* Cluster artifacts */
  db_multi_exec(
    "INSERT OR IGNORE INTO description(rid,uuid,ctime,type,summary)\n"
    "SELECT blob.rid, blob.uuid, tagxref.mtime, 'cluster', 'cluster'\n"
    "  FROM tagxref, blob\n"
    " WHERE (tagxref.rid %s)\n"
    "   AND tagxref.tagid=(SELECT tagid FROM tag WHERE tagname='cluster')\n"
    "   AND blob.rid=tagxref.rid;",

    zWhere /*safe-for-%s*/
  );

  /* Ticket change artifacts */
  db_multi_exec(
    "INSERT OR IGNORE INTO description(rid,uuid,ctime,type,summary)\n"
    "SELECT blob.rid, blob.uuid, tagxref.mtime, 'ticket',\n"






|
|


|
>







843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
    "   AND tagxref.srcid=blob.rid;",
    zWhere /*safe-for-%s*/
  );

  /* Cluster artifacts */
  db_multi_exec(
    "INSERT OR IGNORE INTO description(rid,uuid,ctime,type,summary)\n"
    "SELECT blob.rid, blob.uuid, rcvfrom.mtime, 'cluster', 'cluster'\n"
    "  FROM tagxref, blob, rcvfrom\n"
    " WHERE (tagxref.rid %s)\n"
    "   AND tagxref.tagid=(SELECT tagid FROM tag WHERE tagname='cluster')\n"
    "   AND blob.rid=tagxref.rid"
    "   AND rcvfrom.rcvid=blob.rcvid;",
    zWhere /*safe-for-%s*/
  );

  /* Ticket change artifacts */
  db_multi_exec(
    "INSERT OR IGNORE INTO description(rid,uuid,ctime,type,summary)\n"
    "SELECT blob.rid, blob.uuid, tagxref.mtime, 'ticket',\n"
976
977
978
979
980
981
982

983
984
985
986
987
988

989
990
991
992
993

994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008



1009

1010

1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
























































1032
1033
1034
1035
1036
1037
1038
/*
** WEBPAGE: bloblist
**
** Return a page showing all artifacts in the repository.  Query parameters:
**
**   n=N         Show N artifacts
**   s=S         Start with artifact number S

*/
void bloblist_page(void){
  Stmt q;
  int s = atoi(PD("s","0"));
  int n = atoi(PD("n","5000"));
  int mx = db_int(0, "SELECT max(rid) FROM blob");

  char *zRange;

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  style_header("List Of Artifacts");

  if( mx>n && P("s")==0 ){
    int i;
    @ <p>Select a range of artifacts to view:</p>
    @ <ul>
    for(i=1; i<=mx; i+=n){
      @ <li> %z(href("%R/bloblist?s=%d&n=%d",i,n))
      @ %d(i)..%d(i+n-1<mx?i+n-1:mx)</a>
    }
    @ </ul>
    style_footer();
    return;
  }
  if( mx>n ){
    style_submenu_element("Index", "Index", "bloblist");
  }



  zRange = mprintf("BETWEEN %d AND %d", s, s+n-1);

  describe_artifacts(zRange);

  db_prepare(&q,
    "SELECT rid, uuid, summary, isPrivate FROM description ORDER BY rid"
  );
  @ <table cellpadding="0" cellspacing="0">
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q,0);
    const char *zUuid = db_column_text(&q, 1);
    const char *zDesc = db_column_text(&q, 2);
    int isPriv = db_column_int(&q,2);
    @ <tr><td align="right">%d(rid)</td>
    @ <td>&nbsp;%z(href("%R/info/%!S",zUuid))%s(zUuid)</a>&nbsp;</td>
    @ <td align="left">%h(zDesc)</td>
    if( isPriv ){
      @ <td>(unpublished)</td>
    }
    @ </tr>
  }
  @ </table>
  db_finalize(&q);
  style_footer();
}

























































/*
** COMMAND: test-unsent
**
** Usage: %fossil test-unsent
**
** Show all artifacts in the unsent table






>






>





>
|











|


>
>
>
|
>

>








|

|










>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
/*
** WEBPAGE: bloblist
**
** Return a page showing all artifacts in the repository.  Query parameters:
**
**   n=N         Show N artifacts
**   s=S         Start with artifact number S
**   unpub       Show only unpublished artifacts
*/
void bloblist_page(void){
  Stmt q;
  int s = atoi(PD("s","0"));
  int n = atoi(PD("n","5000"));
  int mx = db_int(0, "SELECT max(rid) FROM blob");
  int unpubOnly = PB("unpub");
  char *zRange;

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  style_header("List Of Artifacts");
  style_submenu_element("250 Largest", 0, "bigbloblist");
  if( !unpubOnly && mx>n && P("s")==0 ){
    int i;
    @ <p>Select a range of artifacts to view:</p>
    @ <ul>
    for(i=1; i<=mx; i+=n){
      @ <li> %z(href("%R/bloblist?s=%d&n=%d",i,n))
      @ %d(i)..%d(i+n-1<mx?i+n-1:mx)</a>
    }
    @ </ul>
    style_footer();
    return;
  }
  if( !unpubOnly && mx>n ){
    style_submenu_element("Index", "Index", "bloblist");
  }
  if( unpubOnly ){
    zRange = mprintf("IN private");
  }else{
    zRange = mprintf("BETWEEN %d AND %d", s, s+n-1);
  }
  describe_artifacts(zRange);
  fossil_free(zRange);
  db_prepare(&q,
    "SELECT rid, uuid, summary, isPrivate FROM description ORDER BY rid"
  );
  @ <table cellpadding="0" cellspacing="0">
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q,0);
    const char *zUuid = db_column_text(&q, 1);
    const char *zDesc = db_column_text(&q, 2);
    int isPriv = db_column_int(&q,3);
    @ <tr><td align="right">%d(rid)</td>
    @ <td>&nbsp;%z(href("%R/info/%!S",zUuid))%S(zUuid)</a>&nbsp;</td>
    @ <td align="left">%h(zDesc)</td>
    if( isPriv ){
      @ <td>(unpublished)</td>
    }
    @ </tr>
  }
  @ </table>
  db_finalize(&q);
  style_footer();
}

/*
** WEBPAGE: bigbloblist
**
** Return a page showing the largest artifacts in the repository in order
** of decreasing size.
**
**   n=N         Show the top N artifacts
*/
void bigbloblist_page(void){
  Stmt q;
  int n = atoi(PD("n","250"));

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
  style_header("%d Largest Artifacts", n);
  db_multi_exec(
    "CREATE TEMP TABLE toshow(rid INTEGER PRIMARY KEY);"
    "INSERT INTO toshow(rid)"
    "  SELECT rid FROM blob"
    "   ORDER BY length(content) DESC"
    "   LIMIT %d;", n
  );
  describe_artifacts("IN toshow");
  db_prepare(&q,
    "SELECT description.rid, description.uuid, description.summary,"
    "       length(blob.content), coalesce(delta.srcid,''),"
    "       datetime(description.ctime)"
    "  FROM description, blob LEFT JOIN delta ON delta.rid=blob.rid"
    " WHERE description.rid=blob.rid"
    " ORDER BY length(content) DESC"
  );
  @ <table cellpadding="2" cellspacing="0" border="1" id="bigblobtab">
  @ <thead><tr><th align="right">Size<th align="right">RID
  @ <th align="right">Delta From<th>SHA1<th>Description<th>Date</tr></thead>
  @ <tbody>
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q,0);
    const char *zUuid = db_column_text(&q, 1);
    const char *zDesc = db_column_text(&q, 2);
    int sz = db_column_int(&q,3);
    const char *zSrcId = db_column_text(&q,4);
    const char *zDate = db_column_text(&q,5);
    @ <tr><td align="right">%d(sz)</td>
    @ <td align="right">%d(rid)</td>
    @ <td align="right">%s(zSrcId)</td>
    @ <td>&nbsp;%z(href("%R/info/%!S",zUuid))%S(zUuid)</a>&nbsp;</td>
    @ <td align="left">%h(zDesc)</td>
    @ <td align="left">%z(href("%R/timeline?c=%T",zDate))%s(zDate)</a></td>
    @ </tr>
  }
  @ </tbody></table>
  db_finalize(&q);
  output_table_sorting_javascript("bigblobtab", "NnnttT", -1);
  style_footer();
}

/*
** COMMAND: test-unsent
**
** Usage: %fossil test-unsent
**
** Show all artifacts in the unsent table
Changes to src/printf.c.
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252


/*
** The root program.  All variations call this core.
**
** INPUTS:
**   func   This is a pointer to a function taking three arguments
**            1. A pointer to anything.  Same as the "arg" parameter.
**            2. A pointer to the list of characters to be output
**               (Note, this list is NOT null terminated.)
**            3. An integer number of characters to be output.
**               (Note: This number might be zero.)
**
**   arg    This is the pointer to anything which will be passed as the
**          first argument to "func".  Use it for whatever you like.
**
**   fmt    This is the format string, as in the usual print.
**
**   ap     This is a pointer to a list of arguments.  Same as in
**          vfprint.
**
** OUTPUTS:






<
<
<
<
<
<
<
|
<







230
231
232
233
234
235
236







237

238
239
240
241
242
243
244


/*
** The root program.  All variations call this core.
**
** INPUTS:







**   pBlob  This is the blob where the output will be built.

**
**   fmt    This is the format string, as in the usual print.
**
**   ap     This is a pointer to a list of arguments.  Same as in
**          vfprint.
**
** OUTPUTS:
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
  vfprintf(out, zFormat, ap);
  fprintf(out, "\n");
  va_end(ap);
  for(i=0; i<sizeof(azEnv)/sizeof(azEnv[0]); i++){
    char *p;
    if( (p = fossil_getenv(azEnv[i]))!=0 ){
      fprintf(out, "%s=%s\n", azEnv[i], p);
      fossil_filename_free(p);
    }else if( (z = P(azEnv[i]))!=0 ){
      fprintf(out, "%s=%s\n", azEnv[i], z);
    }
  }
  fclose(out);
}







|







972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
  vfprintf(out, zFormat, ap);
  fprintf(out, "\n");
  va_end(ap);
  for(i=0; i<sizeof(azEnv)/sizeof(azEnv[0]); i++){
    char *p;
    if( (p = fossil_getenv(azEnv[i]))!=0 ){
      fprintf(out, "%s=%s\n", azEnv[i], p);
      fossil_path_free(p);
    }else if( (z = P(azEnv[i]))!=0 ){
      fprintf(out, "%s=%s\n", azEnv[i], z);
    }
  }
  fclose(out);
}

Changes to src/purge.c.
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
      blob_reset(&content);
    }
  /* The "checkins" subcommand goes here in alphabetical order, but it must
  ** be moved to the end since it is the default case */
  }else if( strncmp(zSubcmd, "list", n)==0 || strcmp(zSubcmd,"ls")==0 ){
    int showDetail = find_option("l","l",0)!=0;
    if( !db_table_exists("repository","purgeevent") ) return;
    db_prepare(&q, "SELECT peid, datetime(ctime,'unixepoch','localtime')"
                   " FROM purgeevent");
    while( db_step(&q)==SQLITE_ROW ){
      fossil_print("%4d on %s\n", db_column_int(&q,0), db_column_text(&q,1));
      if( showDetail ){
        purge_list_event_content(db_column_int(&q,0));
      }
    }






|







490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
      blob_reset(&content);
    }
  /* The "checkins" subcommand goes here in alphabetical order, but it must
  ** be moved to the end since it is the default case */
  }else if( strncmp(zSubcmd, "list", n)==0 || strcmp(zSubcmd,"ls")==0 ){
    int showDetail = find_option("l","l",0)!=0;
    if( !db_table_exists("repository","purgeevent") ) return;
    db_prepare(&q, "SELECT peid, datetime(ctime,'unixepoch',toLocal())"
                   " FROM purgeevent");
    while( db_step(&q)==SQLITE_ROW ){
      fossil_print("%4d on %s\n", db_column_int(&q,0), db_column_text(&q,1));
      if( showDetail ){
        purge_list_event_content(db_column_int(&q,0));
      }
    }
Changes to src/rebuild.c.
531
532
533
534
535
536
537

538
539
540
541
542
543
544
**   --deanalyze       Remove ANALYZE tables from the database
**   --force           Force the rebuild to complete even if errors are seen
**   --ifneeded        Only do the rebuild if it would change the schema version
**   --index           Always add in the full-text search index
**   --noverify        Skip the verification of changes to the BLOB table
**   --noindex         Always omit the full-text search index
**   --pagesize N      Set the database pagesize to N. (512..65536 and power of 2)

**   --randomize       Scan artifacts in a random order
**   --stats           Show artifact statistics after rebuilding
**   --vacuum          Run VACUUM on the database after rebuilding
**   --wal             Set Write-Ahead-Log journalling mode on the database
**
** See also: deconstruct, reconstruct
*/






>







531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
**   --deanalyze       Remove ANALYZE tables from the database
**   --force           Force the rebuild to complete even if errors are seen
**   --ifneeded        Only do the rebuild if it would change the schema version
**   --index           Always add in the full-text search index
**   --noverify        Skip the verification of changes to the BLOB table
**   --noindex         Always omit the full-text search index
**   --pagesize N      Set the database pagesize to N. (512..65536 and power of 2)
**   --quiet           Only show output if there are errors
**   --randomize       Scan artifacts in a random order
**   --stats           Show artifact statistics after rebuilding
**   --vacuum          Run VACUUM on the database after rebuilding
**   --wal             Set Write-Ahead-Log journalling mode on the database
**
** See also: deconstruct, reconstruct
*/
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
  DIR *d;
  struct dirent *pEntry;
  Blob aContent; /* content of the just read artifact */
  static int nFileRead = 0;
  void *zUnicodePath;
  char *zUtf8Name;

  zUnicodePath = fossil_utf8_to_filename(zPath);
  d = opendir(zUnicodePath);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      Blob path;
      char *zSubpath;

      if( pEntry->d_name[0]=='.' ){
        continue;
      }
      zUtf8Name = fossil_filename_to_utf8(pEntry->d_name);
      zSubpath = mprintf("%s/%s", zPath, zUtf8Name);
      fossil_filename_free(zUtf8Name);
#ifdef _DIRENT_HAVE_D_TYPE
      if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)
          ? (file_isdir(zSubpath)==1) : (pEntry->d_type==DT_DIR) )
#else
      if( file_isdir(zSubpath)==1 )
#endif
      {






|









|

|







885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
  DIR *d;
  struct dirent *pEntry;
  Blob aContent; /* content of the just read artifact */
  static int nFileRead = 0;
  void *zUnicodePath;
  char *zUtf8Name;

  zUnicodePath = fossil_utf8_to_path(zPath, 1);
  d = opendir(zUnicodePath);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      Blob path;
      char *zSubpath;

      if( pEntry->d_name[0]=='.' ){
        continue;
      }
      zUtf8Name = fossil_path_to_utf8(pEntry->d_name);
      zSubpath = mprintf("%s/%s", zPath, zUtf8Name);
      fossil_path_free(zUtf8Name);
#ifdef _DIRENT_HAVE_D_TYPE
      if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)
          ? (file_isdir(zSubpath)==1) : (pEntry->d_type==DT_DIR) )
#else
      if( file_isdir(zSubpath)==1 )
#endif
      {
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
      free(zSubpath);
    }
    closedir(d);
  }else {
    fossil_fatal("encountered error %d while trying to open \"%s\".",
                  errno, g.argv[3]);
  }
  fossil_filename_free(zUnicodePath);
}

/*
** COMMAND: reconstruct*
**
** Usage: %fossil reconstruct FILENAME DIRECTORY
**






|







926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
      free(zSubpath);
    }
    closedir(d);
  }else {
    fossil_fatal("encountered error %d while trying to open \"%s\".",
                  errno, g.argv[3]);
  }
  fossil_path_free(zUnicodePath);
}

/*
** COMMAND: reconstruct*
**
** Usage: %fossil reconstruct FILENAME DIRECTORY
**
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
  }
  db_create_repository(g.argv[2]);
  db_open_repository(g.argv[2]);

  /* We should be done with options.. */
  verify_all_options();

  db_open_config(0);
  db_begin_transaction();
  db_initial_setup(0, 0, 0);

  fossil_print("Reading files from directory \"%s\"...\n", g.argv[3]);
  recon_read_dir(g.argv[3]);
  fossil_print("\nBuilding the Fossil repository...\n");







|







956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
  }
  db_create_repository(g.argv[2]);
  db_open_repository(g.argv[2]);

  /* We should be done with options.. */
  verify_all_options();

  db_open_config(0, 0);
  db_begin_transaction();
  db_initial_setup(0, 0, 0);

  fossil_print("Reading files from directory \"%s\"...\n", g.argv[3]);
  recon_read_dir(g.argv[3]);
  fossil_print("\nBuilding the Fossil repository...\n");

Changes to src/report.c.
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977


978
979
980
981

982




983




984




985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
**       t      Sort by text
**       n      Sort numerically
**       k      Sort by the data-sortkey property
**       x      This column is not sortable
**
** Capital letters mean sort in reverse order.
** If there are fewer characters in zColumnTypes[] than their are columns,
** the all extra columns assume type "t" (text).
**
** The third parameter is the column that was initially sorted (using 1-based
** column numbers, like SQL).  Make this value 0 if none of the columns are
** initially sorted.  Make the value negative if the column is initially sorted
** in reverse order.
**
** Clicking on the same column header twice in a row inverts the sort.
*/
void output_table_sorting_javascript(
  const char *zTableId,      /* ID of table to sort */
  const char *zColumnTypes,  /* String for column types */
  int iInitSort              /* Initially sorted column. Leftmost is 1. 0 for NONE */
){
  @ <script>
  @ function SortableTable(tableEl,columnTypes,initSort){
  @   this.tbody = tableEl.getElementsByTagName('tbody');
  @   this.columnTypes = columnTypes;


  @   this.sort = function (cell) {
  @     var column = cell.cellIndex;
  @     var sortFn;
  @     switch( cell.sortType ){

  @       case "N": case "n":  sortFn = this.sortNumeric;  break;




  @       case "T": case "t":  sortFn = this.sortText;     break;




  @       case "K": case "k":  sortFn = this.sortKey;      break;




  @       default:  return;
  @     }
  @     this.sortIndex = column;
  @     var newRows = new Array();
  @     for (j = 0; j < this.tbody[0].rows.length; j++) {
  @        newRows[j] = this.tbody[0].rows[j];
  @     }
  @     if( this.sortIndex==Math.abs(this.prevColumn)-1 ){
  @       newRows.reverse();
  @       this.prevColumn = -this.prevColumn;
  @     }else{
  @       newRows.sort(sortFn);
  @       this.prevColumn = this.sortIndex+1;
  @       if( cell.sortType>="A" && cell.sortType<="Z" ){
  @         newRows.reverse();
  @       }
  @     }
  @     for (i=0;i<newRows.length;i++) {
  @       this.tbody[0].appendChild(newRows[i]);
  @     }
  @     this.setHdrIcons();
  @   }
  @   this.setHdrIcons = function() {






|

















>
>




>
|
>
>
>
>
|
>
>
>
>
|
>
>
>
>













<
<
<







953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012



1013
1014
1015
1016
1017
1018
1019
**       t      Sort by text
**       n      Sort numerically
**       k      Sort by the data-sortkey property
**       x      This column is not sortable
**
** Capital letters mean sort in reverse order.
** If there are fewer characters in zColumnTypes[] than their are columns,
** then all extra columns assume type "t" (text).
**
** The third parameter is the column that was initially sorted (using 1-based
** column numbers, like SQL).  Make this value 0 if none of the columns are
** initially sorted.  Make the value negative if the column is initially sorted
** in reverse order.
**
** Clicking on the same column header twice in a row inverts the sort.
*/
void output_table_sorting_javascript(
  const char *zTableId,      /* ID of table to sort */
  const char *zColumnTypes,  /* String for column types */
  int iInitSort              /* Initially sorted column. Leftmost is 1. 0 for NONE */
){
  @ <script>
  @ function SortableTable(tableEl,columnTypes,initSort){
  @   this.tbody = tableEl.getElementsByTagName('tbody');
  @   this.columnTypes = columnTypes;
  @   var ncols = tableEl.rows[0].cells.length;
  @   for(var i = columnTypes.length; i<=ncols; i++){this.columnTypes += 't';}
  @   this.sort = function (cell) {
  @     var column = cell.cellIndex;
  @     var sortFn;
  @     switch( cell.sortType ){
  if( strchr(zColumnTypes,'n') ){
    @       case "n": sortFn = this.sortNumeric;  break;
  }
  if( strchr(zColumnTypes,'N') ){
    @       case "N": sortFn = this.sortReverseNumeric;  break;
  }
  @       case "t": sortFn = this.sortText;  break;
  if( strchr(zColumnTypes,'T') ){
    @       case "T": sortFn = this.sortReverseText;  break;
  }
  if( strchr(zColumnTypes,'k') ){
    @       case "k": sortFn = this.sortKey;  break;
  }
  if( strchr(zColumnTypes,'K') ){
    @       case "K": sortFn = this.sortReverseKey;  break;
  }
  @       default:  return;
  @     }
  @     this.sortIndex = column;
  @     var newRows = new Array();
  @     for (j = 0; j < this.tbody[0].rows.length; j++) {
  @        newRows[j] = this.tbody[0].rows[j];
  @     }
  @     if( this.sortIndex==Math.abs(this.prevColumn)-1 ){
  @       newRows.reverse();
  @       this.prevColumn = -this.prevColumn;
  @     }else{
  @       newRows.sort(sortFn);
  @       this.prevColumn = this.sortIndex+1;



  @     }
  @     for (i=0;i<newRows.length;i++) {
  @       this.tbody[0].appendChild(newRows[i]);
  @     }
  @     this.setHdrIcons();
  @   }
  @   this.setHdrIcons = function() {
1021
1022
1023
1024
1025
1026
1027

1028







1029

1030
1031


1032
1033
1034
1035
1036
1037
1038
1039
1040













1041
1042
1043
1044

1045








1046

1047
1048

1049
1050
1051
1052
1053
1054
1055
  @       hdrCell.className = clsName;
  @     }
  @   }
  @   this.sortText = function(a,b) {
  @     var i = thisObject.sortIndex;
  @     aa = a.cells[i].textContent.replace(/^\W+/,'').toLowerCase();
  @     bb = b.cells[i].textContent.replace(/^\W+/,'').toLowerCase();

  @     if(aa==bb) return a.rowIndex-b.rowIndex;







  @     if(aa<bb) return -1;

  @     return 1;
  @   }


  @   this.sortNumeric = function(a,b) {
  @     var i = thisObject.sortIndex;
  @     aa = parseFloat(a.cells[i].textContent);
  @     if (isNaN(aa)) aa = 0;
  @     bb = parseFloat(b.cells[i].textContent);
  @     if (isNaN(bb)) bb = 0;
  @     if(aa==bb) return a.rowIndex-b.rowIndex;
  @     return aa-bb;
  @   }













  @   this.sortKey = function(a,b) {
  @     var i = thisObject.sortIndex;
  @     aa = a.cells[i].getAttribute("data-sortkey");
  @     bb = b.cells[i].getAttribute("data-sortkey");

  @     if(aa==bb) return a.rowIndex-b.rowIndex;








  @     if(aa<bb) return -1;

  @     return 1;
  @   }

  @   var x = tableEl.getElementsByTagName('thead');
  @   if(!(this.tbody && this.tbody[0].rows && this.tbody[0].rows.length>0)){
  @     return;
  @   }
  @   if(x && x[0].rows && x[0].rows.length > 0) {
  @     this.hdrRow = x[0].rows[0];
  @   } else {






>

>
>
>
>
>
>
>
|
>
|
|
>
>
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
|
|
|
|
>
|
>
>
>
>
>
>
>
>
|
>
|
|
>







1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  @       hdrCell.className = clsName;
  @     }
  @   }
  @   this.sortText = function(a,b) {
  @     var i = thisObject.sortIndex;
  @     aa = a.cells[i].textContent.replace(/^\W+/,'').toLowerCase();
  @     bb = b.cells[i].textContent.replace(/^\W+/,'').toLowerCase();
  @     if(aa<bb) return -1;
  @     if(aa==bb) return a.rowIndex-b.rowIndex;
  @     return 1;
  @   }
  if( strchr(zColumnTypes,'T') ){
    @   this.sortReverseText = function(a,b) {
    @     var i = thisObject.sortIndex;
    @     aa = a.cells[i].textContent.replace(/^\W+/,'').toLowerCase();
    @     bb = b.cells[i].textContent.replace(/^\W+/,'').toLowerCase();
    @     if(aa<bb) return +1;
    @     if(aa==bb) return a.rowIndex-b.rowIndex;
    @     return -1;
    @   }
  }
  if( strchr(zColumnTypes,'n') ){
    @   this.sortNumeric = function(a,b) {
    @     var i = thisObject.sortIndex;
    @     aa = parseFloat(a.cells[i].textContent);
    @     if (isNaN(aa)) aa = 0;
    @     bb = parseFloat(b.cells[i].textContent);
    @     if (isNaN(bb)) bb = 0;
    @     if(aa==bb) return a.rowIndex-b.rowIndex;
    @     return aa-bb;
    @   }
  }
  if( strchr(zColumnTypes,'N') ){
    @   this.sortReverseNumeric = function(a,b) {
    @     var i = thisObject.sortIndex;
    @     aa = parseFloat(a.cells[i].textContent);
    @     if (isNaN(aa)) aa = 0;
    @     bb = parseFloat(b.cells[i].textContent);
    @     if (isNaN(bb)) bb = 0;
    @     if(aa==bb) return a.rowIndex-b.rowIndex;
    @     return bb-aa;
    @   }
  }
  if( strchr(zColumnTypes,'k') ){
    @   this.sortKey = function(a,b) {
    @     var i = thisObject.sortIndex;
    @     aa = a.cells[i].getAttribute("data-sortkey");
    @     bb = b.cells[i].getAttribute("data-sortkey");
    @     if(aa<bb) return -1;
    @     if(aa==bb) return a.rowIndex-b.rowIndex;
    @     return 1;
    @   }
  }
  if( strchr(zColumnTypes,'K') ){
    @   this.sortReverseKey = function(a,b) {
    @     var i = thisObject.sortIndex;
    @     aa = a.cells[i].getAttribute("data-sortkey");
    @     bb = b.cells[i].getAttribute("data-sortkey");
    @     if(aa<bb) return +1;
    @     if(aa==bb) return a.rowIndex-b.rowIndex;
    @     return -1;
    @   }
  }
  @   var x = tableEl.getElementsByTagName('thead');
  @   if(!(this.tbody && this.tbody[0].rows && this.tbody[0].rows.length>0)){
  @     return;
  @   }
  @   if(x && x[0].rows && x[0].rows.length > 0) {
  @     this.hdrRow = x[0].rows[0];
  @   } else {
Changes to src/search.c.
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
  blob_reset(&pattern);
  search_sql_setup(g.db);

  db_multi_exec(
     "CREATE TEMP TABLE srch(rid,uuid,date,comment,x);"
     "CREATE INDEX srch_idx1 ON srch(x);"
     "INSERT INTO srch(rid,uuid,date,comment,x)"
     "   SELECT blob.rid, uuid, datetime(event.mtime%s),"
     "          coalesce(ecomment,comment),"
     "          search_score()"
     "     FROM event, blob"
     "    WHERE blob.rid=event.objid"
     "      AND search_match(coalesce(ecomment,comment));",
     timeline_utc()
  );
  iBest = db_int(0, "SELECT max(x) FROM srch");
  blob_append(&sql,
              "SELECT rid, uuid, date, comment, 0, 0 FROM srch "
              "WHERE 1 ", -1);
  if(!fAll){
    blob_append_sql(&sql,"AND x>%d ", iBest/3);






|




|
<







565
566
567
568
569
570
571
572
573
574
575
576
577

578
579
580
581
582
583
584
  blob_reset(&pattern);
  search_sql_setup(g.db);

  db_multi_exec(
     "CREATE TEMP TABLE srch(rid,uuid,date,comment,x);"
     "CREATE INDEX srch_idx1 ON srch(x);"
     "INSERT INTO srch(rid,uuid,date,comment,x)"
     "   SELECT blob.rid, uuid, datetime(event.mtime,toLocal()),"
     "          coalesce(ecomment,comment),"
     "          search_score()"
     "     FROM event, blob"
     "    WHERE blob.rid=event.objid"
     "      AND search_match(coalesce(ecomment,comment));"

  );
  iBest = db_int(0, "SELECT max(x) FROM srch");
  blob_append(&sql,
              "SELECT rid, uuid, date, comment, 0, 0 FROM srch "
              "WHERE 1 ", -1);
  if(!fAll){
    blob_append_sql(&sql,"AND x>%d ", iBest/3);
Changes to src/setup.c.
123
124
125
126
127
128
129


130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157


158
159
160
161

162
163
164
165
166
167
168
169
170
171
172
173

174






175



176



177
178
179
180
181
182
183
184
185
186
187

188



189

190
191
192
193

194
195

196
197
198




199
200






201
202
203
204
205
206
207

208

209
210
211
212
213
214

215
216

217







218
219
220
221
222
223
224
225
    "Show artifacts that are shunned by this repository");
  setup_menu_entry("Artifact Receipts Log", "rcvfromlist",
    "A record of received artifacts and their sources");
  setup_menu_entry("User Log", "access_log",
    "A record of login attempts");
  setup_menu_entry("Administrative Log", "admin_log",
    "View the admin_log entries");


  setup_menu_entry("Sitemap", "sitemap",
    "Links to miscellaneous pages");
  setup_menu_entry("SQL", "admin_sql",
    "Enter raw SQL commands");
  setup_menu_entry("TH1", "admin_th1",
    "Enter raw TH1 commands");
  @ </table>

  style_footer();
}

/*
** WEBPAGE: setup_ulist
**
** Show a list of users.  Clicking on any user jumps to the edit
** screen for that user.  Requires Admin privileges.
*/
void setup_ulist(void){
  Stmt s;
  int prevLevel = 0;

  login_check_credentials();
  if( !g.perm.Admin ){
    login_needed(0);
    return;
  }

  style_submenu_element("Add", "Add User", "setup_uedit");


  style_header("User List");
  @ <table class="usetupLayoutTable">
  @ <tr><td class="usetupColumnLayout">
  @ <span class="note">Users:</span>

  @ <table class="usetupUserList">
  prevLevel = 0;
  db_prepare(&s,
     "SELECT uid, login, cap, info, 1 FROM user"
     " WHERE login IN ('anonymous','nobody','developer','reader') "
     " UNION ALL "
     "SELECT uid, login, cap, info, 2 FROM user"
     " WHERE login NOT IN ('anonymous','nobody','developer','reader') "
     "ORDER BY 5, 2 COLLATE nocase"
  );
  while( db_step(&s)==SQLITE_ROW ){
    int iLevel = db_column_int(&s, 4);

    const char *zCap = db_column_text(&s, 2);






    const char *zLogin = db_column_text(&s, 1);



    if( iLevel>prevLevel ){



      if( prevLevel>0 ){
        @ <tr><td colspan="3"><hr></td></tr>
      }
      if( iLevel==1 ){
        @ <tr>
        @   <th class="usetupListUser"
        @    style="text-align: right;padding-right: 20px;">Category</th>
        @   <th class="usetupListCap"
        @    style="text-align: center;padding-right: 15px;">Capabilities</th>
        @   <th class="usetupListCon"
        @    style="text-align: left;">Notes</th>

        @ </tr>



      }else{

        @ <tr>
        @   <th class="usetupListUser"
        @    style="text-align: right;padding-right: 20px;">User&nbsp;ID</th>
        @   <th class="usetupListCap"

        @    style="text-align: center;padding-right: 15px;">Capabilities</th>
        @   <th class="usetupListCon"

        @    style="text-align: left;">Contact&nbsp;Info</th>
        @ </tr>
      }




      prevLevel = iLevel;
    }






    @ <tr>
    @ <td class="usetupListUser"
    @     style="text-align: right;padding-right: 20px;white-space:nowrap;">
    if( g.perm.Admin && (zCap[0]!='s' || g.perm.Setup) ){
      @ <a href="setup_uedit?id=%d(db_column_int(&s,0))">
    }
    @ %h(zLogin)

    if( g.perm.Admin ){

      @ </a>
    }
    @ </td>
    @ <td class="usetupListCap" style="text-align: center;padding-right: 15px;">%s(zCap)</td>
    @ <td  class="usetupListCon"  style="text-align: left;">%h(db_column_text(&s,3))</td>
    @ </tr>

  }
  @ </table>

  @ </td><td class="usetupColumnLayout">







  @ <span class="note">Notes:</span>
  @ <ol>
  @ <li><p>The permission flags are as follows:</p>
  @ <table>
     @ <tr><th valign="top">a</th>
     @   <td><i>Admin:</i> Create and delete users</td></tr>
     @ <tr><th valign="top">b</th>
     @   <td><i>Attach:</i> Add attachments to wiki or tickets</td></tr>






>
>



















<








>
>

|
<
<
>
|
<

|
<
<
|
|
|


|
>

>
>
>
>
>
>
|
>
>
>
|
>
>
>
|
|
|
|
|
<
<
|
<
|
<
>
|
>
>
>
|
>
|
|
<
|
>
|
|
>
|
|
<
>
>
>
>
|
<
>
>
>
>
>
>

|
<
<
|
<
|
>
|
>
|
|
|
<
|
|
>
|
|
>
|
>
>
>
>
>
>
>
|







123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150

151
152
153
154
155
156
157
158
159
160
161
162


163
164

165
166


167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193


194

195

196
197
198
199
200
201
202
203
204

205
206
207
208
209
210
211

212
213
214
215
216

217
218
219
220
221
222
223
224


225

226
227
228
229
230
231
232

233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
    "Show artifacts that are shunned by this repository");
  setup_menu_entry("Artifact Receipts Log", "rcvfromlist",
    "A record of received artifacts and their sources");
  setup_menu_entry("User Log", "access_log",
    "A record of login attempts");
  setup_menu_entry("Administrative Log", "admin_log",
    "View the admin_log entries");
  setup_menu_entry("Stats", "stat",
    "Repository Status Reports");
  setup_menu_entry("Sitemap", "sitemap",
    "Links to miscellaneous pages");
  setup_menu_entry("SQL", "admin_sql",
    "Enter raw SQL commands");
  setup_menu_entry("TH1", "admin_th1",
    "Enter raw TH1 commands");
  @ </table>

  style_footer();
}

/*
** WEBPAGE: setup_ulist
**
** Show a list of users.  Clicking on any user jumps to the edit
** screen for that user.  Requires Admin privileges.
*/
void setup_ulist(void){
  Stmt s;


  login_check_credentials();
  if( !g.perm.Admin ){
    login_needed(0);
    return;
  }

  style_submenu_element("Add", "Add User", "setup_uedit");
  style_submenu_element("Log", "Access Log", "access_log");
  style_submenu_element("Help", "Help", "setup_ulist_notes");
  style_header("User List");
  @ <table border=1 cellpadding=2 cellspacing=0 class='userTable'>


  @ <thead><tr><th>UID <th>Category <th>Capabilities <th>Info <th>Last Change</tr></thead>
  @ <tbody>

  db_prepare(&s,
     "SELECT uid, login, cap, date(mtime,'unixepoch')"


     "  FROM user"
     " WHERE login IN ('anonymous','nobody','developer','reader')"
     " ORDER BY login"
  );
  while( db_step(&s)==SQLITE_ROW ){
    int uid = db_column_int(&s, 0);
    const char *zLogin = db_column_text(&s, 1);
    const char *zCap = db_column_text(&s, 2);
    const char *zDate = db_column_text(&s, 4);
    @ <tr>
    @ <td><a href='setup_uedit?id=%d(uid)'>%d(uid)</a>
    @ <td><a href='setup_uedit?id=%d(uid)'>%h(zLogin)</a>
    @ <td>%h(zCap)
    
    if( fossil_strcmp(zLogin,"anonymous")==0 ){
      @ <td>All logged-in users
    }else if( fossil_strcmp(zLogin,"developer")==0 ){
      @ <td>Users with '<b>v</b>' capability
    }else if( fossil_strcmp(zLogin,"nobody")==0 ){
      @ <td>All users without login
    }else if( fossil_strcmp(zLogin,"reader")==0 ){
      @ <td>Users with '<b>u</b>' capability
    }else{
      @ <td>
    }
    if( zDate && zDate[0] ){
      @ <td>%h(zDate)


    }else{

      @ <td>

    }
    @ </tr>
  }
  db_finalize(&s);
  @ </tbody></table>
  @ <div class='section'>Users</div>
  @ <table border=1 cellpadding=2 cellspacing=0 class='userTable' id='userlist'>
  @ <thead><tr>
  @ <th>ID<th>Login<th>Caps<th>Info<th>Date<th>Expire</tr></thead>

  @ <tbody>
  db_prepare(&s,
     "SELECT uid, login, cap, info, date(mtime,'unixepoch'), lower(login) AS sortkey, "
     "       CASE WHEN info LIKE '%%expires 20%%'"
             "    THEN substr(info,instr(lower(info),'expires')+8,10)"
             "    END AS exp"
     "  FROM user"

     " WHERE login NOT IN ('anonymous','nobody','developer','reader')"
     " ORDER BY sortkey"
  );
  while( db_step(&s)==SQLITE_ROW ){
    int uid = db_column_int(&s, 0);

    const char *zLogin = db_column_text(&s, 1);
    const char *zCap = db_column_text(&s, 2);
    const char *zInfo = db_column_text(&s, 3);
    const char *zDate = db_column_text(&s, 4);
    const char *zSortKey = db_column_text(&s,5);
    const char *zExp = db_column_text(&s,6);
    @ <tr>
    @ <td><a href='setup_uedit?id=%d(uid)'>%d(uid)</a>


    @ <td data-sortkey='%h(zSortKey)'><a href='setup_uedit?id=%d(uid)'>%h(zLogin)</a>

    @ <td>%h(zCap)
    @ <td>%h(zInfo)
    @ <td>%h(zDate?zDate:"")
    @ <td>%h(zExp?zExp:"")
    @ </tr>
  }
  @ </tbody></table>

  db_finalize(&s);
  output_table_sorting_javascript("userlist","nktxTT",2);
  style_footer();
}

/*
** WEBPAGE: setup_ulist_notes
**
** A documentation page showing notes about user configuration.  This information
** used to be a side-bar on the user list page, but has been factored out for
** improved presentation.
*/
void setup_ulist_notes(void){
  style_header("User Configuration Notes");
  @ <h1>User Configuration Notes:</h1>
  @ <ol>
  @ <li><p>The permission flags are as follows:</p>
  @ <table>
     @ <tr><th valign="top">a</th>
     @   <td><i>Admin:</i> Create and delete users</td></tr>
     @ <tr><th valign="top">b</th>
     @   <td><i>Attach:</i> Add attachments to wiki or tickets</td></tr>
293
294
295
296
297
298
299
300
301
302
303

304
305
306
307
308
309
310
  @ Users with privilege <span class="capability">v</span> inherit the combined
  @ privileges of <span class="usertype">developer</span>,
  @ <span class="usertype">anonymous</span>, and
  @ <span class="usertype">nobody</span>.
  @ </p></li>
  @
  @ </ol>
  @ </td></tr></table>
  style_footer();
  db_finalize(&s);
}


/*
** Return true if zPw is a valid password string.  A valid
** password string is:
**
**  (1)  A zero-length string, or
**  (2)  a string that contains a character other than '*'.






<

<

>







322
323
324
325
326
327
328

329

330
331
332
333
334
335
336
337
338
  @ Users with privilege <span class="capability">v</span> inherit the combined
  @ privileges of <span class="usertype">developer</span>,
  @ <span class="usertype">anonymous</span>, and
  @ <span class="usertype">nobody</span>.
  @ </p></li>
  @
  @ </ol>

  style_footer();

}


/*
** Return true if zPw is a valid password string.  A valid
** password string is:
**
**  (1)  A zero-length string, or
**  (2)  a string that contains a character other than '*'.
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
  const char *zId, *zLogin, *zInfo, *zCap, *zPw;
  const char *zGroup;
  const char *zOldLogin;
  int doWrite;
  int uid, i;
  int higherUser = 0;  /* True if user being edited is SETUP and the */
                       /* user doing the editing is ADMIN.  Disallow editing */
  char *inherit[128];
  int a[128];
  const char *oa[128];

  /* Must have ADMIN privileges to access this page
  */
  login_check_credentials();
  if( !g.perm.Admin ){ login_needed(0); return; }






|







354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
  const char *zId, *zLogin, *zInfo, *zCap, *zPw;
  const char *zGroup;
  const char *zOldLogin;
  int doWrite;
  int uid, i;
  int higherUser = 0;  /* True if user being edited is SETUP and the */
                       /* user doing the editing is ADMIN.  Disallow editing */
  const char *inherit[128];
  int a[128];
  const char *oa[128];

  /* Must have ADMIN privileges to access this page
  */
  login_check_credentials();
  if( !g.perm.Admin ){ login_needed(0); return; }
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
** Generate an entry box for an attribute.
*/
void entry_attribute(
  const char *zLabel,   /* The text label on the entry box */
  int width,            /* Width of the entry box */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQParm,   /* The query parameter */
  char *zDflt,          /* Default value if VAR table entry does not exist */
  int disabled          /* 1 if disabled */
){
  const char *zVal = db_get(zVar, zDflt);
  const char *zQ = P(zQParm);
  if( zQ && fossil_strcmp(zQ,zVal)!=0 ){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();






|







932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
** Generate an entry box for an attribute.
*/
void entry_attribute(
  const char *zLabel,   /* The text label on the entry box */
  int width,            /* Width of the entry box */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQParm,   /* The query parameter */
  const char *zDflt,    /* Default value if VAR table entry does not exist */
  int disabled          /* 1 if disabled */
){
  const char *zVal = db_get(zVar, zDflt);
  const char *zQ = P(zQParm);
  if( zQ && fossil_strcmp(zQ,zVal)!=0 ){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
  int rows,             /* Rows in the textarea */
  int cols,             /* Columns in the textarea */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQP,      /* The query parameter */
  const char *zDflt,    /* Default value if VAR table entry does not exist */
  int disabled          /* 1 if the textarea should  not be editable */
){
  const char *z = db_get(zVar, (char*)zDflt);
  const char *zQ = P(zQP);
  if( zQ && !disabled && fossil_strcmp(zQ,z)!=0){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();
    db_set(zVar, zQ, 0);
    admin_log("Set textarea_attribute %Q to: %.*s%s",
              zVar, 20, zQ, (nZQ>20 ? "..." : ""));






|







964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
  int rows,             /* Rows in the textarea */
  int cols,             /* Columns in the textarea */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQP,      /* The query parameter */
  const char *zDflt,    /* Default value if VAR table entry does not exist */
  int disabled          /* 1 if the textarea should  not be editable */
){
  const char *z = db_get(zVar, zDflt);
  const char *zQ = P(zQP);
  if( zQ && !disabled && fossil_strcmp(zQ,z)!=0){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();
    db_set(zVar, zQ, 0);
    admin_log("Set textarea_attribute %Q to: %.*s%s",
              zVar, 20, zQ, (nZQ>20 ? "..." : ""));
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
  const char *zLabel,   /* The text label on the menu */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQP,      /* The query parameter */
  const char *zDflt,    /* Default value if VAR table entry does not exist */
  int nChoice,          /* Number of choices */
  const char *const *azChoice /* Choices. 2 per choice: (VAR value, Display) */
){
  const char *z = db_get(zVar, (char*)zDflt);
  const char *zQ = P(zQP);
  int i;
  if( zQ && fossil_strcmp(zQ,z)!=0){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();
    db_set(zVar, zQ, 0);
    admin_log("Set multiple_choice_attribute %Q to: %.*s%s",






|







998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
  const char *zLabel,   /* The text label on the menu */
  const char *zVar,     /* The corresponding row in the VAR table */
  const char *zQP,      /* The query parameter */
  const char *zDflt,    /* Default value if VAR table entry does not exist */
  int nChoice,          /* Number of choices */
  const char *const *azChoice /* Choices. 2 per choice: (VAR value, Display) */
){
  const char *z = db_get(zVar, zDflt);
  const char *zQ = P(zQP);
  int i;
  if( zQ && fossil_strcmp(zQ,z)!=0){
    const int nZQ = (int)strlen(zQ);
    login_verify_csrf_secret();
    db_set(zVar, zQ, 0);
    admin_log("Set multiple_choice_attribute %Q to: %.*s%s",
1345
1346
1347
1348
1349
1350
1351






1352
1353
1354
1355
1356
1357
1358
  @ <hr />
  onoff_attribute("Plaintext comments on timelines",
                  "timeline-plaintext", "tpt", 0, 0);
  @ <p>In timeline displays, check-in comments are displayed literally,
  @ without any wiki or HTML interpretation.  (Note: Use CSS to change
  @ display formatting features such as fonts and line-wrapping behavior.)</p>







  @ <hr />
  onoff_attribute("Use Universal Coordinated Time (UTC)",
                  "timeline-utc", "utc", 1, 0);
  @ <p>Show times as UTC (also sometimes called Greenwich Mean Time (GMT) or
  @ Zulu) instead of in local time.  On this server, local time is currently
  tmDiff = db_double(0.0, "SELECT julianday('now')");
  tmDiff = db_double(0.0,






>
>
>
>
>
>







1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
  @ <hr />
  onoff_attribute("Plaintext comments on timelines",
                  "timeline-plaintext", "tpt", 0, 0);
  @ <p>In timeline displays, check-in comments are displayed literally,
  @ without any wiki or HTML interpretation.  (Note: Use CSS to change
  @ display formatting features such as fonts and line-wrapping behavior.)</p>

  @ <hr />
  onoff_attribute("Truncate comment at first blank line",
                  "timeline-truncate-at-blank", "ttb", 0, 0);
  @ <p>In timeline displays, check-in comments are displayed only through
  @ the first blank line.</p>

  @ <hr />
  onoff_attribute("Use Universal Coordinated Time (UTC)",
                  "timeline-utc", "utc", 1, 0);
  @ <p>Show times as UTC (also sometimes called Greenwich Mean Time (GMT) or
  @ Zulu) instead of in local time.  On this server, local time is currently
  tmDiff = db_double(0.0, "SELECT julianday('now')");
  tmDiff = db_double(0.0,
1846
1847
1848
1849
1850
1851
1852

1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
  const char *zQ = P("q");
  int go = P("go")!=0;
  login_check_credentials();
  if( !g.perm.Setup ){
    login_needed(0);
    return;
  }

  db_begin_transaction();
  style_header("Raw SQL Commands");
  @ <p><b>Caution:</b> There are no restrictions on the SQL that can be
  @ run by this page.  You can do serious and irrepairable damage to the
  @ repository.  Proceed with extreme caution.</p>
  @
  @ <p>Only a the first statement in the entry box will be run.
  @ Any subsequent statements will be silently ignored.</p>
  @
  @ <p>Database names:<ul><li>repository &rarr; %s(db_name("repository"))
  if( g.zConfigDbName ){
    @ <li>config &rarr; %s(db_name("configdb"))
  }
  if( g.localOpen ){






>






|







1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
  const char *zQ = P("q");
  int go = P("go")!=0;
  login_check_credentials();
  if( !g.perm.Setup ){
    login_needed(0);
    return;
  }
  add_content_sql_commands(g.db);
  db_begin_transaction();
  style_header("Raw SQL Commands");
  @ <p><b>Caution:</b> There are no restrictions on the SQL that can be
  @ run by this page.  You can do serious and irrepairable damage to the
  @ repository.  Proceed with extreme caution.</p>
  @
  @ <p>Only the first statement in the entry box will be run.
  @ Any subsequent statements will be silently ignored.</p>
  @
  @ <p>Database names:<ul><li>repository &rarr; %s(db_name("repository"))
  if( g.zConfigDbName ){
    @ <li>config &rarr; %s(db_name("configdb"))
  }
  if( g.localOpen ){
2033
2034
2035
2036
2037
2038
2039
2040

2041
2042
2043
2044
2045
2046
2047
    login_needed(0);
    return;
  }
  style_header("Admin Log");
  create_admin_log_table();
  limit = atoi(PD("n","20"));
  fLogEnabled = db_get_boolean("admin-log", 0);
  @ <div>Admin logging is %s(fLogEnabled?"on":"off").</div>



  @ <div>Limit results to: <span>
  admin_log_render_limits();
  @ </span></div>

  blob_append_sql(&qLog,






|
>







2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
    login_needed(0);
    return;
  }
  style_header("Admin Log");
  create_admin_log_table();
  limit = atoi(PD("n","20"));
  fLogEnabled = db_get_boolean("admin-log", 0);
  @ <div>Admin logging is %s(fLogEnabled?"on":"off").
  @ (Change this on the <a href="setup_settings">settings</a> page.)</div>


  @ <div>Limit results to: <span>
  admin_log_render_limits();
  @ </span></div>

  blob_append_sql(&qLog,
Changes to src/shell.c.
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
static int enableTimer = 0;

/* Return the current wall-clock time */
static sqlite3_int64 timeOfDay(void){
  static sqlite3_vfs *clockVfs = 0;
  sqlite3_int64 t;
  if( clockVfs==0 ) clockVfs = sqlite3_vfs_find(0);
  if( clockVfs->iVersion>=1 && clockVfs->xCurrentTimeInt64!=0 ){
    clockVfs->xCurrentTimeInt64(clockVfs, &t);
  }else{
    double r;
    clockVfs->xCurrentTime(clockVfs, &r);
    t = (sqlite3_int64)(r*86400000.0);
  }
  return t;






|







161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
static int enableTimer = 0;

/* Return the current wall-clock time */
static sqlite3_int64 timeOfDay(void){
  static sqlite3_vfs *clockVfs = 0;
  sqlite3_int64 t;
  if( clockVfs==0 ) clockVfs = sqlite3_vfs_find(0);
  if( clockVfs->iVersion>=2 && clockVfs->xCurrentTimeInt64!=0 ){
    clockVfs->xCurrentTimeInt64(clockVfs, &t);
  }else{
    double r;
    clockVfs->xCurrentTime(clockVfs, &r);
    t = (sqlite3_int64)(r*86400000.0);
  }
  return t;
325
326
327
328
329
330
331







332
333
334
335
336
337
338
/*
** Threat stdin as an interactive input if the following variable
** is true.  Otherwise, assume stdin is connected to a file or pipe.
*/
static int stdin_is_interactive = 1;








/*
** The following is the open SQLite database.  We make a pointer
** to this database a static variable so that it can be accessed
** by the SIGINT handler to interrupt database processing.
*/
static sqlite3 *globalDb = 0;







>
>
>
>
>
>
>







325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
/*
** Threat stdin as an interactive input if the following variable
** is true.  Otherwise, assume stdin is connected to a file or pipe.
*/
static int stdin_is_interactive = 1;

/*
** On Windows systems we have to know if standard output is a console
** in order to translate UTF-8 into MBCS.  The following variable is
** true if translation is required.
*/
static int stdout_is_console = 1;

/*
** The following is the open SQLite database.  We make a pointer
** to this database a static variable so that it can be accessed
** by the SIGINT handler to interrupt database processing.
*/
static sqlite3 *globalDb = 0;

425
426
427
428
429
430
431










432
433
434
435
436
437
438
  assert( 0==argc );
  assert( zShellStatic );
  UNUSED_PARAMETER(argc);
  UNUSED_PARAMETER(argv);
  sqlite3_result_text(context, zShellStatic, -1, SQLITE_STATIC);
}












/*
** This routine reads a line of text from FILE in, stores
** the text in memory obtained from malloc() and returns a pointer
** to the text.  NULL is returned at end of file, or if malloc()
** fails.
**






>
>
>
>
>
>
>
>
>
>







432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
  assert( 0==argc );
  assert( zShellStatic );
  UNUSED_PARAMETER(argc);
  UNUSED_PARAMETER(argv);
  sqlite3_result_text(context, zShellStatic, -1, SQLITE_STATIC);
}


/*
** Compute a string length that is limited to what can be stored in
** lower 30 bits of a 32-bit signed integer.
*/
static int strlen30(const char *z){
  const char *z2 = z;
  while( *z2 ){ z2++; }
  return 0x3fffffff & (int)(z2 - z);
}

/*
** This routine reads a line of text from FILE in, stores
** the text in memory obtained from malloc() and returns a pointer
** to the text.  NULL is returned at end of file, or if malloc()
** fails.
**
461
462
463
464
465
466
467




















468
469
470
471
472
473
474
    if( n>0 && zLine[n-1]=='\n' ){
      n--;
      if( n>0 && zLine[n-1]=='\r' ) n--;
      zLine[n] = 0;
      break;
    }
  }




















  return zLine;
}

/*
** Retrieve a single line of input text.
**
** If in==0 then read from standard input and prompt before each line.






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
    if( n>0 && zLine[n-1]=='\n' ){
      n--;
      if( n>0 && zLine[n-1]=='\r' ) n--;
      zLine[n] = 0;
      break;
    }
  }
#if defined(_WIN32) || defined(WIN32)
  /* For interactive input on Windows systems, translate the 
  ** multi-byte characterset characters into UTF-8. */
  if( stdin_is_interactive ){
    extern char *sqlite3_win32_mbcs_to_utf8(const char*);
    char *zTrans = sqlite3_win32_mbcs_to_utf8(zLine);
    if( zTrans ){
      int nTrans = strlen30(zTrans)+1;
      if( nTrans>nLine ){
        zLine = realloc(zLine, nTrans);
        if( zLine==0 ){
          sqlite3_free(zTrans);
          return 0;
        }
      }
      memcpy(zLine, zTrans, nTrans);
      sqlite3_free(zTrans);
    }
  }
#endif /* defined(_WIN32) || defined(WIN32) */
  return zLine;
}

/*
** Retrieve a single line of input text.
**
** If in==0 then read from standard input and prompt before each line.
498
499
500
501
502
503
504

































505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

525
526
527

528
529
530
531
532
533
534


535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
    zResult = shell_readline(zPrompt);
    if( zResult && *zResult ) shell_add_history(zResult);
#endif
  }
  return zResult;
}


































/*
** Shell output mode information from before ".explain on", 
** saved so that it can be restored by ".explain off"
*/
typedef struct SavedModeInfo SavedModeInfo;
struct SavedModeInfo {
  int valid;          /* Is there legit data in here? */
  int mode;           /* Mode prior to ".explain on" */
  int showHeader;     /* The ".header" setting prior to ".explain on" */
  int colWidth[100];  /* Column widths prior to ".explain on" */
};

/*
** State information about the database connection is contained in an
** instance of the following structure.
*/
typedef struct ShellState ShellState;
struct ShellState {
  sqlite3 *db;           /* The database */
  int echoOn;            /* True to echo input commands */

  int autoEQP;           /* Run EXPLAIN QUERY PLAN prior to seach SQL stmt */
  int statsOn;           /* True to display memory stats before each finalize */
  int scanstatsOn;       /* True to display scan stats before each finalize */

  int backslashOn;       /* Resolve C-style \x escapes in SQL input text */
  int outCount;          /* Revert to stdout when reaching zero */
  int cnt;               /* Number of records displayed so far */
  FILE *out;             /* Write results here */
  FILE *traceOut;        /* Output for sqlite3_trace() */
  int nErr;              /* Number of errors seen */
  int mode;              /* An output mode setting */


  int writableSchema;    /* True if PRAGMA writable_schema=ON */
  int showHeader;        /* True to show column names in List or Column mode */
  unsigned shellFlgs;    /* Various flags */
  char *zDestTable;      /* Name of destination table when MODE_Insert */
  char colSeparator[20]; /* Column separator character for several modes */
  char rowSeparator[20]; /* Row separator character for MODE_Ascii */
  int colWidth[100];     /* Requested width of each column when in column mode*/
  int actualWidth[100];  /* Actual width of each column */
  char nullValue[20];    /* The text to print when a NULL comes back from
                         ** the database */
  SavedModeInfo normalMode;/* Holds the mode just before .explain ON */
  char outfile[FILENAME_MAX]; /* Filename for *out */
  const char *zDbFilename;    /* name of the database file */
  char *zFreeOnClose;         /* Filename to free when closing */
  const char *zVfs;           /* Name of VFS to use */
  sqlite3_stmt *pStmt;   /* Current statement if any. */
  FILE *pLog;            /* Write log output here */
  int *aiIndent;         /* Array of indents used in MODE_Explain */






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>




















>



>







>
>










<







535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618

619
620
621
622
623
624
625
    zResult = shell_readline(zPrompt);
    if( zResult && *zResult ) shell_add_history(zResult);
#endif
  }
  return zResult;
}

/*
** Render output like fprintf().  Except, if the output is going to the
** console and if this is running on a Windows machine, translate the
** output from UTF-8 into MBCS.
*/
#if defined(_WIN32) || defined(WIN32)
void utf8_printf(FILE *out, const char *zFormat, ...){
  va_list ap;
  va_start(ap, zFormat);
  if( stdout_is_console && (out==stdout || out==stderr) ){
    extern char *sqlite3_win32_utf8_to_mbcs(const char*);
    char *z1 = sqlite3_vmprintf(zFormat, ap);
    char *z2 = sqlite3_win32_utf8_to_mbcs(z1);
    sqlite3_free(z1);
    fputs(z2, out);
    sqlite3_free(z2);
  }else{
    vfprintf(out, zFormat, ap);
  }
  va_end(ap);
}
#elif !defined(utf8_printf)
# define utf8_printf fprintf
#endif

/*
** Render output like fprintf().  This should not be used on anything that
** includes string formatting (e.g. "%s").
*/
#if !defined(raw_printf)
# define raw_printf fprintf
#endif

/*
** Shell output mode information from before ".explain on", 
** saved so that it can be restored by ".explain off"
*/
typedef struct SavedModeInfo SavedModeInfo;
struct SavedModeInfo {
  int valid;          /* Is there legit data in here? */
  int mode;           /* Mode prior to ".explain on" */
  int showHeader;     /* The ".header" setting prior to ".explain on" */
  int colWidth[100];  /* Column widths prior to ".explain on" */
};

/*
** State information about the database connection is contained in an
** instance of the following structure.
*/
typedef struct ShellState ShellState;
struct ShellState {
  sqlite3 *db;           /* The database */
  int echoOn;            /* True to echo input commands */
  int autoExplain;       /* Automatically turn on .explain mode */
  int autoEQP;           /* Run EXPLAIN QUERY PLAN prior to seach SQL stmt */
  int statsOn;           /* True to display memory stats before each finalize */
  int scanstatsOn;       /* True to display scan stats before each finalize */
  int countChanges;      /* True to display change counts */
  int backslashOn;       /* Resolve C-style \x escapes in SQL input text */
  int outCount;          /* Revert to stdout when reaching zero */
  int cnt;               /* Number of records displayed so far */
  FILE *out;             /* Write results here */
  FILE *traceOut;        /* Output for sqlite3_trace() */
  int nErr;              /* Number of errors seen */
  int mode;              /* An output mode setting */
  int cMode;             /* temporary output mode for the current query */
  int normalMode;        /* Output mode before ".explain on" */
  int writableSchema;    /* True if PRAGMA writable_schema=ON */
  int showHeader;        /* True to show column names in List or Column mode */
  unsigned shellFlgs;    /* Various flags */
  char *zDestTable;      /* Name of destination table when MODE_Insert */
  char colSeparator[20]; /* Column separator character for several modes */
  char rowSeparator[20]; /* Row separator character for MODE_Ascii */
  int colWidth[100];     /* Requested width of each column when in column mode*/
  int actualWidth[100];  /* Actual width of each column */
  char nullValue[20];    /* The text to print when a NULL comes back from
                         ** the database */

  char outfile[FILENAME_MAX]; /* Filename for *out */
  const char *zDbFilename;    /* name of the database file */
  char *zFreeOnClose;         /* Filename to free when closing */
  const char *zVfs;           /* Name of VFS to use */
  sqlite3_stmt *pStmt;   /* Current statement if any. */
  FILE *pLog;            /* Write log output here */
  int *aiIndent;         /* Array of indents used in MODE_Explain */
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
#define SEP_Record    "\x1E"

/*
** Number of elements in an array
*/
#define ArraySize(X)  (int)(sizeof(X)/sizeof(X[0]))

/*
** Compute a string length that is limited to what can be stored in
** lower 30 bits of a 32-bit signed integer.
*/
static int strlen30(const char *z){
  const char *z2 = z;
  while( *z2 ){ z2++; }
  return 0x3fffffff & (int)(z2 - z);
}

/*
** A callback for the sqlite3_log() interface.
*/
static void shellLog(void *pArg, int iErrCode, const char *zMsg){
  ShellState *p = (ShellState*)pArg;
  if( p->pLog==0 ) return;
  fprintf(p->pLog, "(%d) %s\n", iErrCode, zMsg);
  fflush(p->pLog);
}

/*
** Output the given string as a hex-encoded blob (eg. X'1234' )
*/
static void output_hex_blob(FILE *out, const void *pBlob, int nBlob){
  int i;
  char *zBlob = (char *)pBlob;
  fprintf(out,"X'");
  for(i=0; i<nBlob; i++){ fprintf(out,"%02x",zBlob[i]&0xff); }
  fprintf(out,"'");
}

/*
** Output the given string as a quoted string using SQL quoting conventions.
*/
static void output_quoted_string(FILE *out, const char *z){
  int i;
  int nSingle = 0;
  setBinaryMode(out);
  for(i=0; z[i]; i++){
    if( z[i]=='\'' ) nSingle++;
  }
  if( nSingle==0 ){
    fprintf(out,"'%s'",z);
  }else{
    fprintf(out,"'");
    while( *z ){
      for(i=0; z[i] && z[i]!='\''; i++){}
      if( i==0 ){
        fprintf(out,"''");
        z++;
      }else if( z[i]=='\'' ){
        fprintf(out,"%.*s''",i,z);
        z += i+1;
      }else{
        fprintf(out,"%s",z);
        break;
      }
    }
    fprintf(out,"'");
  }
  setTextMode(out);
}

/*
** Output the given string as a quoted according to C or TCL quoting rules.
*/






<
<
<
<
<
<
<
<
<
<






|









|
|
|













|

|



|


|


|



|







675
676
677
678
679
680
681










682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
#define SEP_Record    "\x1E"

/*
** Number of elements in an array
*/
#define ArraySize(X)  (int)(sizeof(X)/sizeof(X[0]))











/*
** A callback for the sqlite3_log() interface.
*/
static void shellLog(void *pArg, int iErrCode, const char *zMsg){
  ShellState *p = (ShellState*)pArg;
  if( p->pLog==0 ) return;
  utf8_printf(p->pLog, "(%d) %s\n", iErrCode, zMsg);
  fflush(p->pLog);
}

/*
** Output the given string as a hex-encoded blob (eg. X'1234' )
*/
static void output_hex_blob(FILE *out, const void *pBlob, int nBlob){
  int i;
  char *zBlob = (char *)pBlob;
  raw_printf(out,"X'");
  for(i=0; i<nBlob; i++){ raw_printf(out,"%02x",zBlob[i]&0xff); }
  raw_printf(out,"'");
}

/*
** Output the given string as a quoted string using SQL quoting conventions.
*/
static void output_quoted_string(FILE *out, const char *z){
  int i;
  int nSingle = 0;
  setBinaryMode(out);
  for(i=0; z[i]; i++){
    if( z[i]=='\'' ) nSingle++;
  }
  if( nSingle==0 ){
    utf8_printf(out,"'%s'",z);
  }else{
    raw_printf(out,"'");
    while( *z ){
      for(i=0; z[i] && z[i]!='\''; i++){}
      if( i==0 ){
        raw_printf(out,"''");
        z++;
      }else if( z[i]=='\'' ){
        utf8_printf(out,"%.*s''",i,z);
        z += i+1;
      }else{
        utf8_printf(out,"%s",z);
        break;
      }
    }
    raw_printf(out,"'");
  }
  setTextMode(out);
}

/*
** Output the given string as a quoted according to C or TCL quoting rules.
*/
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
    }else if( c=='\n' ){
      fputc('\\', out);
      fputc('n', out);
    }else if( c=='\r' ){
      fputc('\\', out);
      fputc('r', out);
    }else if( !isprint(c&0xff) ){
      fprintf(out, "\\%03o", c&0xff);
    }else{
      fputc(c, out);
    }
  }
  fputc('"', out);
}







|







751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
    }else if( c=='\n' ){
      fputc('\\', out);
      fputc('n', out);
    }else if( c=='\r' ){
      fputc('\\', out);
      fputc('r', out);
    }else if( !isprint(c&0xff) ){
      raw_printf(out, "\\%03o", c&0xff);
    }else{
      fputc(c, out);
    }
  }
  fputc('"', out);
}

712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
            && z[i]!='<' 
            && z[i]!='&' 
            && z[i]!='>' 
            && z[i]!='\"' 
            && z[i]!='\'';
        i++){}
    if( i>0 ){
      fprintf(out,"%.*s",i,z);
    }
    if( z[i]=='<' ){
      fprintf(out,"&lt;");
    }else if( z[i]=='&' ){
      fprintf(out,"&amp;");
    }else if( z[i]=='>' ){
      fprintf(out,"&gt;");
    }else if( z[i]=='\"' ){
      fprintf(out,"&quot;");
    }else if( z[i]=='\'' ){
      fprintf(out,"&#39;");
    }else{
      break;
    }
    z += i + 1;
  }
}







|


|

|

|

|

|







775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
            && z[i]!='<' 
            && z[i]!='&' 
            && z[i]!='>' 
            && z[i]!='\"' 
            && z[i]!='\'';
        i++){}
    if( i>0 ){
      utf8_printf(out,"%.*s",i,z);
    }
    if( z[i]=='<' ){
      raw_printf(out,"&lt;");
    }else if( z[i]=='&' ){
      raw_printf(out,"&amp;");
    }else if( z[i]=='>' ){
      raw_printf(out,"&gt;");
    }else if( z[i]=='\"' ){
      raw_printf(out,"&quot;");
    }else if( z[i]=='\'' ){
      raw_printf(out,"&#39;");
    }else{
      break;
    }
    z += i + 1;
  }
}

763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
** the separator, which may or may not be a comma.  p->nullValue is
** the null value.  Strings are quoted if necessary.  The separator
** is only issued if bSep is true.
*/
static void output_csv(ShellState *p, const char *z, int bSep){
  FILE *out = p->out;
  if( z==0 ){
    fprintf(out,"%s",p->nullValue);
  }else{
    int i;
    int nSep = strlen30(p->colSeparator);
    for(i=0; z[i]; i++){
      if( needCsvQuote[((unsigned char*)z)[i]] 
         || (z[i]==p->colSeparator[0] && 
             (nSep==1 || memcmp(z, p->colSeparator, nSep)==0)) ){
        i = 0;
        break;
      }
    }
    if( i==0 ){
      putc('"', out);
      for(i=0; z[i]; i++){
        if( z[i]=='"' ) putc('"', out);
        putc(z[i], out);
      }
      putc('"', out);
    }else{
      fprintf(out, "%s", z);
    }
  }
  if( bSep ){
    fprintf(p->out, "%s", p->colSeparator);
  }
}

#ifdef SIGINT
/*
** This routine runs when the user presses Ctrl-C
*/






|



















|



|







826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
** the separator, which may or may not be a comma.  p->nullValue is
** the null value.  Strings are quoted if necessary.  The separator
** is only issued if bSep is true.
*/
static void output_csv(ShellState *p, const char *z, int bSep){
  FILE *out = p->out;
  if( z==0 ){
    utf8_printf(out,"%s",p->nullValue);
  }else{
    int i;
    int nSep = strlen30(p->colSeparator);
    for(i=0; z[i]; i++){
      if( needCsvQuote[((unsigned char*)z)[i]] 
         || (z[i]==p->colSeparator[0] && 
             (nSep==1 || memcmp(z, p->colSeparator, nSep)==0)) ){
        i = 0;
        break;
      }
    }
    if( i==0 ){
      putc('"', out);
      for(i=0; z[i]; i++){
        if( z[i]=='"' ) putc('"', out);
        putc(z[i], out);
      }
      putc('"', out);
    }else{
      utf8_printf(out, "%s", z);
    }
  }
  if( bSep ){
    utf8_printf(p->out, "%s", p->colSeparator);
  }
}

#ifdef SIGINT
/*
** This routine runs when the user presses Ctrl-C
*/
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840













841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877

878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
  char **azArg,    /* Text of each result column */
  char **azCol,    /* Column names */
  int *aiType      /* Column types */
){
  int i;
  ShellState *p = (ShellState*)pArg;

  switch( p->mode ){
    case MODE_Line: {
      int w = 5;
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        int len = strlen30(azCol[i] ? azCol[i] : "");
        if( len>w ) w = len;
      }
      if( p->cnt++>0 ) fprintf(p->out, "%s", p->rowSeparator);
      for(i=0; i<nArg; i++){
        fprintf(p->out,"%*s = %s%s", w, azCol[i],
                azArg[i] ? azArg[i] : p->nullValue, p->rowSeparator);
      }
      break;
    }
    case MODE_Explain:
    case MODE_Column: {













      if( p->cnt++==0 ){
        for(i=0; i<nArg; i++){
          int w, n;
          if( i<ArraySize(p->colWidth) ){
            w = p->colWidth[i];
          }else{
            w = 0;
          }
          if( w==0 ){
            w = strlen30(azCol[i] ? azCol[i] : "");
            if( w<10 ) w = 10;
            n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullValue);
            if( w<n ) w = n;
          }
          if( i<ArraySize(p->actualWidth) ){
            p->actualWidth[i] = w;
          }
          if( p->showHeader ){
            if( w<0 ){
              fprintf(p->out,"%*.*s%s",-w,-w,azCol[i],
                      i==nArg-1 ? p->rowSeparator : "  ");
            }else{
              fprintf(p->out,"%-*.*s%s",w,w,azCol[i],
                      i==nArg-1 ? p->rowSeparator : "  ");
            }
          }
        }
        if( p->showHeader ){
          for(i=0; i<nArg; i++){
            int w;
            if( i<ArraySize(p->actualWidth) ){
               w = p->actualWidth[i];
               if( w<0 ) w = -w;
            }else{
               w = 10;
            }
            fprintf(p->out,"%-*.*s%s",w,w,"-----------------------------------"

                   "----------------------------------------------------------",
                    i==nArg-1 ? p->rowSeparator : "  ");
          }
        }
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        int w;
        if( i<ArraySize(p->actualWidth) ){
           w = p->actualWidth[i];
        }else{
           w = 10;
        }
        if( p->mode==MODE_Explain && azArg[i] && strlen30(azArg[i])>w ){
          w = strlen30(azArg[i]);
        }
        if( i==1 && p->aiIndent && p->pStmt ){
          if( p->iIndent<p->nIndent ){
            fprintf(p->out, "%*.s", p->aiIndent[p->iIndent], "");
          }
          p->iIndent++;
        }
        if( w<0 ){
          fprintf(p->out,"%*.*s%s",-w,-w,
              azArg[i] ? azArg[i] : p->nullValue,
              i==nArg-1 ? p->rowSeparator : "  ");
        }else{
          fprintf(p->out,"%-*.*s%s",w,w,
              azArg[i] ? azArg[i] : p->nullValue,
              i==nArg-1 ? p->rowSeparator : "  ");
        }
      }
      break;
    }
    case MODE_Semi:
    case MODE_List: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          fprintf(p->out,"%s%s",azCol[i],
                  i==nArg-1 ? p->rowSeparator : p->colSeparator);
        }
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        char *z = azArg[i];
        if( z==0 ) z = p->nullValue;
        fprintf(p->out, "%s", z);
        if( i<nArg-1 ){
          fprintf(p->out, "%s", p->colSeparator);
        }else if( p->mode==MODE_Semi ){
          fprintf(p->out, ";%s", p->rowSeparator);
        }else{
          fprintf(p->out, "%s", p->rowSeparator);
        }
      }
      break;
    }
    case MODE_Html: {
      if( p->cnt++==0 && p->showHeader ){
        fprintf(p->out,"<TR>");
        for(i=0; i<nArg; i++){
          fprintf(p->out,"<TH>");
          output_html_string(p->out, azCol[i]);
          fprintf(p->out,"</TH>\n");
        }
        fprintf(p->out,"</TR>\n");
      }
      if( azArg==0 ) break;
      fprintf(p->out,"<TR>");
      for(i=0; i<nArg; i++){
        fprintf(p->out,"<TD>");
        output_html_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
        fprintf(p->out,"</TD>\n");
      }
      fprintf(p->out,"</TR>\n");
      break;
    }
    case MODE_Tcl: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          output_c_string(p->out,azCol[i] ? azCol[i] : "");
          if(i<nArg-1) fprintf(p->out, "%s", p->colSeparator);
        }
        fprintf(p->out, "%s", p->rowSeparator);
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        output_c_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
        if(i<nArg-1) fprintf(p->out, "%s", p->colSeparator);
      }
      fprintf(p->out, "%s", p->rowSeparator);
      break;
    }
    case MODE_Csv: {
      setBinaryMode(p->out);
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          output_csv(p, azCol[i] ? azCol[i] : "", i<nArg-1);
        }
        fprintf(p->out, "%s", p->rowSeparator);
      }
      if( nArg>0 ){
        for(i=0; i<nArg; i++){
          output_csv(p, azArg[i], i<nArg-1);
        }
        fprintf(p->out, "%s", p->rowSeparator);
      }
      setTextMode(p->out);
      break;
    }
    case MODE_Insert: {
      p->cnt++;
      if( azArg==0 ) break;
      fprintf(p->out,"INSERT INTO %s",p->zDestTable);
      if( p->showHeader ){
        fprintf(p->out,"(");
        for(i=0; i<nArg; i++){
          char *zSep = i>0 ? ",": "";
          fprintf(p->out, "%s%s", zSep, azCol[i]);
        }
        fprintf(p->out,")");
      }
      fprintf(p->out," VALUES(");
      for(i=0; i<nArg; i++){
        char *zSep = i>0 ? ",": "";
        if( (azArg[i]==0) || (aiType && aiType[i]==SQLITE_NULL) ){
          fprintf(p->out,"%sNULL",zSep);
        }else if( aiType && aiType[i]==SQLITE_TEXT ){
          if( zSep[0] ) fprintf(p->out,"%s",zSep);
          output_quoted_string(p->out, azArg[i]);
        }else if( aiType && (aiType[i]==SQLITE_INTEGER
                             || aiType[i]==SQLITE_FLOAT) ){
          fprintf(p->out,"%s%s",zSep, azArg[i]);
        }else if( aiType && aiType[i]==SQLITE_BLOB && p->pStmt ){
          const void *pBlob = sqlite3_column_blob(p->pStmt, i);
          int nBlob = sqlite3_column_bytes(p->pStmt, i);
          if( zSep[0] ) fprintf(p->out,"%s",zSep);
          output_hex_blob(p->out, pBlob, nBlob);
        }else if( isNumber(azArg[i], 0) ){
          fprintf(p->out,"%s%s",zSep, azArg[i]);
        }else{
          if( zSep[0] ) fprintf(p->out,"%s",zSep);
          output_quoted_string(p->out, azArg[i]);
        }
      }
      fprintf(p->out,");\n");
      break;
    }
    case MODE_Ascii: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          if( i>0 ) fprintf(p->out, "%s", p->colSeparator);
          fprintf(p->out,"%s",azCol[i] ? azCol[i] : "");
        }
        fprintf(p->out, "%s", p->rowSeparator);
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        if( i>0 ) fprintf(p->out, "%s", p->colSeparator);
        fprintf(p->out,"%s",azArg[i] ? azArg[i] : p->nullValue);
      }
      fprintf(p->out, "%s", p->rowSeparator);
      break;
    }
  }
  return 0;
}

/*






|







|

|






>
>
>
>
>
>
>
>
>
>
>
>
>




|












|

|
|

|
|



|








|
>

|











|




|




|

|

|

|








|







|

|
|
|

|






|

|

|

|


|

|

|

|






|

|




|

|








|





|







|

|


|

|

|



|

|



|



|


|

|



|





|
|

|



|
|

|







880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
  char **azArg,    /* Text of each result column */
  char **azCol,    /* Column names */
  int *aiType      /* Column types */
){
  int i;
  ShellState *p = (ShellState*)pArg;

  switch( p->cMode ){
    case MODE_Line: {
      int w = 5;
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        int len = strlen30(azCol[i] ? azCol[i] : "");
        if( len>w ) w = len;
      }
      if( p->cnt++>0 ) utf8_printf(p->out, "%s", p->rowSeparator);
      for(i=0; i<nArg; i++){
        utf8_printf(p->out,"%*s = %s%s", w, azCol[i],
                azArg[i] ? azArg[i] : p->nullValue, p->rowSeparator);
      }
      break;
    }
    case MODE_Explain:
    case MODE_Column: {
      static const int aExplainWidths[] = {4, 13, 4, 4, 4, 13, 2, 13};
      const int *colWidth;
      int showHdr;
      char *rowSep;
      if( p->cMode==MODE_Column ){
        colWidth = p->colWidth;
        showHdr = p->showHeader;
        rowSep = p->rowSeparator;
      }else{
        colWidth = aExplainWidths;
        showHdr = 1;
        rowSep = SEP_Row;
      }
      if( p->cnt++==0 ){
        for(i=0; i<nArg; i++){
          int w, n;
          if( i<ArraySize(p->colWidth) ){
            w = colWidth[i];
          }else{
            w = 0;
          }
          if( w==0 ){
            w = strlen30(azCol[i] ? azCol[i] : "");
            if( w<10 ) w = 10;
            n = strlen30(azArg && azArg[i] ? azArg[i] : p->nullValue);
            if( w<n ) w = n;
          }
          if( i<ArraySize(p->actualWidth) ){
            p->actualWidth[i] = w;
          }
          if( showHdr ){
            if( w<0 ){
              utf8_printf(p->out,"%*.*s%s",-w,-w,azCol[i],
                      i==nArg-1 ? rowSep : "  ");
            }else{
              utf8_printf(p->out,"%-*.*s%s",w,w,azCol[i],
                      i==nArg-1 ? rowSep : "  ");
            }
          }
        }
        if( showHdr ){
          for(i=0; i<nArg; i++){
            int w;
            if( i<ArraySize(p->actualWidth) ){
               w = p->actualWidth[i];
               if( w<0 ) w = -w;
            }else{
               w = 10;
            }
            utf8_printf(p->out,"%-*.*s%s",w,w,
                   "----------------------------------------------------------"
                   "----------------------------------------------------------",
                    i==nArg-1 ? rowSep : "  ");
          }
        }
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        int w;
        if( i<ArraySize(p->actualWidth) ){
           w = p->actualWidth[i];
        }else{
           w = 10;
        }
        if( p->cMode==MODE_Explain && azArg[i] && strlen30(azArg[i])>w ){
          w = strlen30(azArg[i]);
        }
        if( i==1 && p->aiIndent && p->pStmt ){
          if( p->iIndent<p->nIndent ){
            utf8_printf(p->out, "%*.s", p->aiIndent[p->iIndent], "");
          }
          p->iIndent++;
        }
        if( w<0 ){
          utf8_printf(p->out,"%*.*s%s",-w,-w,
              azArg[i] ? azArg[i] : p->nullValue,
              i==nArg-1 ? rowSep : "  ");
        }else{
          utf8_printf(p->out,"%-*.*s%s",w,w,
              azArg[i] ? azArg[i] : p->nullValue,
              i==nArg-1 ? rowSep : "  ");
        }
      }
      break;
    }
    case MODE_Semi:
    case MODE_List: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          utf8_printf(p->out,"%s%s",azCol[i],
                  i==nArg-1 ? p->rowSeparator : p->colSeparator);
        }
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        char *z = azArg[i];
        if( z==0 ) z = p->nullValue;
        utf8_printf(p->out, "%s", z);
        if( i<nArg-1 ){
          utf8_printf(p->out, "%s", p->colSeparator);
        }else if( p->cMode==MODE_Semi ){
          utf8_printf(p->out, ";%s", p->rowSeparator);
        }else{
          utf8_printf(p->out, "%s", p->rowSeparator);
        }
      }
      break;
    }
    case MODE_Html: {
      if( p->cnt++==0 && p->showHeader ){
        raw_printf(p->out,"<TR>");
        for(i=0; i<nArg; i++){
          raw_printf(p->out,"<TH>");
          output_html_string(p->out, azCol[i]);
          raw_printf(p->out,"</TH>\n");
        }
        raw_printf(p->out,"</TR>\n");
      }
      if( azArg==0 ) break;
      raw_printf(p->out,"<TR>");
      for(i=0; i<nArg; i++){
        raw_printf(p->out,"<TD>");
        output_html_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
        raw_printf(p->out,"</TD>\n");
      }
      raw_printf(p->out,"</TR>\n");
      break;
    }
    case MODE_Tcl: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          output_c_string(p->out,azCol[i] ? azCol[i] : "");
          if(i<nArg-1) utf8_printf(p->out, "%s", p->colSeparator);
        }
        utf8_printf(p->out, "%s", p->rowSeparator);
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        output_c_string(p->out, azArg[i] ? azArg[i] : p->nullValue);
        if(i<nArg-1) utf8_printf(p->out, "%s", p->colSeparator);
      }
      utf8_printf(p->out, "%s", p->rowSeparator);
      break;
    }
    case MODE_Csv: {
      setBinaryMode(p->out);
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          output_csv(p, azCol[i] ? azCol[i] : "", i<nArg-1);
        }
        utf8_printf(p->out, "%s", p->rowSeparator);
      }
      if( nArg>0 ){
        for(i=0; i<nArg; i++){
          output_csv(p, azArg[i], i<nArg-1);
        }
        utf8_printf(p->out, "%s", p->rowSeparator);
      }
      setTextMode(p->out);
      break;
    }
    case MODE_Insert: {
      p->cnt++;
      if( azArg==0 ) break;
      utf8_printf(p->out,"INSERT INTO %s",p->zDestTable);
      if( p->showHeader ){
        raw_printf(p->out,"(");
        for(i=0; i<nArg; i++){
          char *zSep = i>0 ? ",": "";
          utf8_printf(p->out, "%s%s", zSep, azCol[i]);
        }
        raw_printf(p->out,")");
      }
      raw_printf(p->out," VALUES(");
      for(i=0; i<nArg; i++){
        char *zSep = i>0 ? ",": "";
        if( (azArg[i]==0) || (aiType && aiType[i]==SQLITE_NULL) ){
          utf8_printf(p->out,"%sNULL",zSep);
        }else if( aiType && aiType[i]==SQLITE_TEXT ){
          if( zSep[0] ) utf8_printf(p->out,"%s",zSep);
          output_quoted_string(p->out, azArg[i]);
        }else if( aiType && (aiType[i]==SQLITE_INTEGER
                             || aiType[i]==SQLITE_FLOAT) ){
          utf8_printf(p->out,"%s%s",zSep, azArg[i]);
        }else if( aiType && aiType[i]==SQLITE_BLOB && p->pStmt ){
          const void *pBlob = sqlite3_column_blob(p->pStmt, i);
          int nBlob = sqlite3_column_bytes(p->pStmt, i);
          if( zSep[0] ) utf8_printf(p->out,"%s",zSep);
          output_hex_blob(p->out, pBlob, nBlob);
        }else if( isNumber(azArg[i], 0) ){
          utf8_printf(p->out,"%s%s",zSep, azArg[i]);
        }else{
          if( zSep[0] ) utf8_printf(p->out,"%s",zSep);
          output_quoted_string(p->out, azArg[i]);
        }
      }
      raw_printf(p->out,");\n");
      break;
    }
    case MODE_Ascii: {
      if( p->cnt++==0 && p->showHeader ){
        for(i=0; i<nArg; i++){
          if( i>0 ) utf8_printf(p->out, "%s", p->colSeparator);
          utf8_printf(p->out,"%s",azCol[i] ? azCol[i] : "");
        }
        utf8_printf(p->out, "%s", p->rowSeparator);
      }
      if( azArg==0 ) break;
      for(i=0; i<nArg; i++){
        if( i>0 ) utf8_printf(p->out, "%s", p->colSeparator);
        utf8_printf(p->out,"%s",azArg[i] ? azArg[i] : p->nullValue);
      }
      utf8_printf(p->out, "%s", p->rowSeparator);
      break;
    }
  }
  return 0;
}

/*
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
      needQuote = 1;
      if( zName[i]=='\'' ) n++;
    }
  }
  if( needQuote ) n += 2;
  z = p->zDestTable = malloc( n+1 );
  if( z==0 ){
    fprintf(stderr,"Error: out of memory\n");
    exit(1);
  }
  n = 0;
  if( needQuote ) z[n++] = '\'';
  for(i=0; zName[i]; i++){
    z[n++] = zName[i];
    if( zName[i]=='\'' ) z[n++] = '\'';






|







1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
      needQuote = 1;
      if( zName[i]=='\'' ) n++;
    }
  }
  if( needQuote ) n += 2;
  z = p->zDestTable = malloc( n+1 );
  if( z==0 ){
    raw_printf(stderr,"Error: out of memory\n");
    exit(1);
  }
  n = 0;
  if( needQuote ) z[n++] = '\'';
  for(i=0; zName[i]; i++){
    z[n++] = zName[i];
    if( zName[i]=='\'' ) z[n++] = '\'';
1154
1155
1156
1157
1158
1159
1160
1161

1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188

1189
1190
1191
1192
1193
1194
1195
  sqlite3_stmt *pSelect;
  int rc;
  int nResult;
  int i;
  const char *z;
  rc = sqlite3_prepare_v2(p->db, zSelect, -1, &pSelect, 0);
  if( rc!=SQLITE_OK || !pSelect ){
    fprintf(p->out, "/**** ERROR: (%d) %s *****/\n", rc, sqlite3_errmsg(p->db));

    if( (rc&0xff)!=SQLITE_CORRUPT ) p->nErr++;
    return rc;
  }
  rc = sqlite3_step(pSelect);
  nResult = sqlite3_column_count(pSelect);
  while( rc==SQLITE_ROW ){
    if( zFirstRow ){
      fprintf(p->out, "%s", zFirstRow);
      zFirstRow = 0;
    }
    z = (const char*)sqlite3_column_text(pSelect, 0);
    fprintf(p->out, "%s", z);
    for(i=1; i<nResult; i++){ 
      fprintf(p->out, ",%s", sqlite3_column_text(pSelect, i));
    }
    if( z==0 ) z = "";
    while( z[0] && (z[0]!='-' || z[1]!='-') ) z++;
    if( z[0] ){
      fprintf(p->out, "\n;\n");
    }else{
      fprintf(p->out, ";\n");
    }    
    rc = sqlite3_step(pSelect);
  }
  rc = sqlite3_finalize(pSelect);
  if( rc!=SQLITE_OK ){
    fprintf(p->out, "/**** ERROR: (%d) %s *****/\n", rc, sqlite3_errmsg(p->db));

    if( (rc&0xff)!=SQLITE_CORRUPT ) p->nErr++;
  }
  return rc;
}

/*
** Allocate space and save off current error string.






|
>







|



|

|




|

|





|
>







1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
  sqlite3_stmt *pSelect;
  int rc;
  int nResult;
  int i;
  const char *z;
  rc = sqlite3_prepare_v2(p->db, zSelect, -1, &pSelect, 0);
  if( rc!=SQLITE_OK || !pSelect ){
    utf8_printf(p->out, "/**** ERROR: (%d) %s *****/\n", rc,
                sqlite3_errmsg(p->db));
    if( (rc&0xff)!=SQLITE_CORRUPT ) p->nErr++;
    return rc;
  }
  rc = sqlite3_step(pSelect);
  nResult = sqlite3_column_count(pSelect);
  while( rc==SQLITE_ROW ){
    if( zFirstRow ){
      utf8_printf(p->out, "%s", zFirstRow);
      zFirstRow = 0;
    }
    z = (const char*)sqlite3_column_text(pSelect, 0);
    utf8_printf(p->out, "%s", z);
    for(i=1; i<nResult; i++){ 
      utf8_printf(p->out, ",%s", sqlite3_column_text(pSelect, i));
    }
    if( z==0 ) z = "";
    while( z[0] && (z[0]!='-' || z[1]!='-') ) z++;
    if( z[0] ){
      raw_printf(p->out, "\n;\n");
    }else{
      raw_printf(p->out, ";\n");
    }    
    rc = sqlite3_step(pSelect);
  }
  rc = sqlite3_finalize(pSelect);
  if( rc!=SQLITE_OK ){
    utf8_printf(p->out, "/**** ERROR: (%d) %s *****/\n", rc,
                sqlite3_errmsg(p->db));
    if( (rc&0xff)!=SQLITE_CORRUPT ) p->nErr++;
  }
  return rc;
}

/*
** Allocate space and save off current error string.
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244

1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277

1278
1279
1280
1281
1282

1283
1284
1285

1286
1287
1288

1289
1290
1291
1292

1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304

1305
1306
1307

1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320


1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
  int iCur;
  int iHiwtr;

  if( pArg && pArg->out ){
    
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MEMORY_USED, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out,
            "Memory Used:                         %d (max %d) bytes\n",
            iCur, iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Number of Outstanding Allocations:   %d (max %d)\n",
            iCur, iHiwtr);
    if( pArg->shellFlgs & SHFLG_Pagecache ){
      iHiwtr = iCur = -1;
      sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset);
      fprintf(pArg->out,
              "Number of Pcache Pages Used:         %d (max %d) pages\n",
              iCur, iHiwtr);
    }
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out,
            "Number of Pcache Overflow Bytes:     %d (max %d) bytes\n",
            iCur, iHiwtr);
    if( pArg->shellFlgs & SHFLG_Scratch ){
      iHiwtr = iCur = -1;
      sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset);

      fprintf(pArg->out, "Number of Scratch Allocations Used:  %d (max %d)\n",
              iCur, iHiwtr);
    }
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out,
            "Number of Scratch Overflow Bytes:    %d (max %d) bytes\n",
            iCur, iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MALLOC_SIZE, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Largest Allocation:                  %d bytes\n",
            iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PAGECACHE_SIZE, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Largest Pcache Allocation:           %d bytes\n",
            iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_SCRATCH_SIZE, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Largest Scratch Allocation:          %d bytes\n",
            iHiwtr);
#ifdef YYTRACKMAXSTACKDEPTH
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PARSER_STACK, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Deepest Parser Stack:                %d (max %d)\n",
            iCur, iHiwtr);
#endif
  }

  if( pArg && pArg->out && db ){
    if( pArg->shellFlgs & SHFLG_Lookaside ){
      iHiwtr = iCur = -1;
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED,
                        &iCur, &iHiwtr, bReset);

      fprintf(pArg->out, "Lookaside Slots Used:                %d (max %d)\n",
              iCur, iHiwtr);
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT,
                        &iCur, &iHiwtr, bReset);
      fprintf(pArg->out, "Successful lookaside attempts:       %d\n", iHiwtr);

      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
                        &iCur, &iHiwtr, bReset);
      fprintf(pArg->out, "Lookaside failures due to size:      %d\n", iHiwtr);

      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
                        &iCur, &iHiwtr, bReset);
      fprintf(pArg->out, "Lookaside failures due to OOM:       %d\n", iHiwtr);

    }
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Pager Heap Usage:                    %d bytes\n",iCur);

    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1);
    fprintf(pArg->out, "Page cache hits:                     %d\n", iCur);
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1);
    fprintf(pArg->out, "Page cache misses:                   %d\n", iCur); 
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_WRITE, &iCur, &iHiwtr, 1);
    fprintf(pArg->out, "Page cache writes:                   %d\n", iCur); 
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Schema Heap Usage:                   %d bytes\n",iCur); 

    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset);
    fprintf(pArg->out, "Statement Heap/Lookaside Usage:      %d bytes\n",iCur); 

  }

  if( pArg && pArg->out && db && pArg->pStmt ){
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP,
                               bReset);
    fprintf(pArg->out, "Fullscan Steps:                      %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_SORT, bReset);
    fprintf(pArg->out, "Sort Operations:                     %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX,bReset);
    fprintf(pArg->out, "Autoindex Inserts:                   %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_VM_STEP, bReset);
    fprintf(pArg->out, "Virtual Machine Steps:               %d\n", iCur);
  }



  return 0;
}

/*
** Display scan stats.
*/
static void display_scanstats(
  sqlite3 *db,                    /* Database to query */
  ShellState *pArg                /* Pointer to ShellState */
){
#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
  UNUSED_PARAMETER(db);
  UNUSED_PARAMETER(pArg);
#else
  int i, k, n, mx;
  fprintf(pArg->out, "-------- scanstats --------\n");
  mx = 0;
  for(k=0; k<=mx; k++){
    double rEstLoop = 1.0;
    for(i=n=0; 1; i++){
      sqlite3_stmt *p = pArg->pStmt;
      sqlite3_int64 nLoop, nVisit;
      double rEst;
      int iSid;
      const char *zExplain;
      if( sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NLOOP, (void*)&nLoop) ){
        break;
      }
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_SELECTID, (void*)&iSid);
      if( iSid>mx ) mx = iSid;
      if( iSid!=k ) continue;
      if( n==0 ){
        rEstLoop = (double)nLoop;
        if( k>0 ) fprintf(pArg->out, "-------- subquery %d -------\n", k);
      }
      n++;
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NVISIT, (void*)&nVisit);
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EST, (void*)&rEst);
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EXPLAIN, (void*)&zExplain);
      fprintf(pArg->out, "Loop %2d: %s\n", n, zExplain);
      rEstLoop *= rEst;
      fprintf(pArg->out, 
          "         nLoop=%-8lld nRow=%-8lld estRow=%-8lld estRow/Loop=%-8g\n",
          nLoop, nVisit, (sqlite3_int64)(rEstLoop+0.5), rEst
      );
    }
  }
  fprintf(pArg->out, "---------------------------\n");
#endif
}

/*
** Parameter azArray points to a zero-terminated array of strings. zStr
** points to a single nul-terminated string. Return non-zero if zStr
** is equal, according to strcmp(), to any of the strings in the array.






|




|




|





|





>
|




|




|



|



|




|









>
|



|
>


|
>


|
>



|
>


|


|


|


|
>


|
>





|

|

|

|

>
>
















|

















|





|

|





|







1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
  int iCur;
  int iHiwtr;

  if( pArg && pArg->out ){
    
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MEMORY_USED, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out,
            "Memory Used:                         %d (max %d) bytes\n",
            iCur, iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MALLOC_COUNT, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Number of Outstanding Allocations:   %d (max %d)\n",
            iCur, iHiwtr);
    if( pArg->shellFlgs & SHFLG_Pagecache ){
      iHiwtr = iCur = -1;
      sqlite3_status(SQLITE_STATUS_PAGECACHE_USED, &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out,
              "Number of Pcache Pages Used:         %d (max %d) pages\n",
              iCur, iHiwtr);
    }
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PAGECACHE_OVERFLOW, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out,
            "Number of Pcache Overflow Bytes:     %d (max %d) bytes\n",
            iCur, iHiwtr);
    if( pArg->shellFlgs & SHFLG_Scratch ){
      iHiwtr = iCur = -1;
      sqlite3_status(SQLITE_STATUS_SCRATCH_USED, &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out,
              "Number of Scratch Allocations Used:  %d (max %d)\n",
              iCur, iHiwtr);
    }
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_SCRATCH_OVERFLOW, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out,
            "Number of Scratch Overflow Bytes:    %d (max %d) bytes\n",
            iCur, iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_MALLOC_SIZE, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Largest Allocation:                  %d bytes\n",
            iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PAGECACHE_SIZE, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Largest Pcache Allocation:           %d bytes\n",
            iHiwtr);
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_SCRATCH_SIZE, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Largest Scratch Allocation:          %d bytes\n",
            iHiwtr);
#ifdef YYTRACKMAXSTACKDEPTH
    iHiwtr = iCur = -1;
    sqlite3_status(SQLITE_STATUS_PARSER_STACK, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Deepest Parser Stack:                %d (max %d)\n",
            iCur, iHiwtr);
#endif
  }

  if( pArg && pArg->out && db ){
    if( pArg->shellFlgs & SHFLG_Lookaside ){
      iHiwtr = iCur = -1;
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_USED,
                        &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out,
              "Lookaside Slots Used:                %d (max %d)\n",
              iCur, iHiwtr);
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_HIT,
                        &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out, "Successful lookaside attempts:       %d\n",
              iHiwtr);
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
                        &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out, "Lookaside failures due to size:      %d\n",
              iHiwtr);
      sqlite3_db_status(db, SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
                        &iCur, &iHiwtr, bReset);
      raw_printf(pArg->out, "Lookaside failures due to OOM:       %d\n",
              iHiwtr);
    }
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_USED, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Pager Heap Usage:                    %d bytes\n",
            iCur);
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_HIT, &iCur, &iHiwtr, 1);
    raw_printf(pArg->out, "Page cache hits:                     %d\n", iCur);
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_MISS, &iCur, &iHiwtr, 1);
    raw_printf(pArg->out, "Page cache misses:                   %d\n", iCur); 
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_CACHE_WRITE, &iCur, &iHiwtr, 1);
    raw_printf(pArg->out, "Page cache writes:                   %d\n", iCur); 
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Schema Heap Usage:                   %d bytes\n",
            iCur); 
    iHiwtr = iCur = -1;
    sqlite3_db_status(db, SQLITE_DBSTATUS_STMT_USED, &iCur, &iHiwtr, bReset);
    raw_printf(pArg->out, "Statement Heap/Lookaside Usage:      %d bytes\n",
            iCur); 
  }

  if( pArg && pArg->out && db && pArg->pStmt ){
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_FULLSCAN_STEP,
                               bReset);
    raw_printf(pArg->out, "Fullscan Steps:                      %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_SORT, bReset);
    raw_printf(pArg->out, "Sort Operations:                     %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_AUTOINDEX,bReset);
    raw_printf(pArg->out, "Autoindex Inserts:                   %d\n", iCur);
    iCur = sqlite3_stmt_status(pArg->pStmt, SQLITE_STMTSTATUS_VM_STEP, bReset);
    raw_printf(pArg->out, "Virtual Machine Steps:               %d\n", iCur);
  }

  /* Do not remove this machine readable comment: extra-stats-output-here */

  return 0;
}

/*
** Display scan stats.
*/
static void display_scanstats(
  sqlite3 *db,                    /* Database to query */
  ShellState *pArg                /* Pointer to ShellState */
){
#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
  UNUSED_PARAMETER(db);
  UNUSED_PARAMETER(pArg);
#else
  int i, k, n, mx;
  raw_printf(pArg->out, "-------- scanstats --------\n");
  mx = 0;
  for(k=0; k<=mx; k++){
    double rEstLoop = 1.0;
    for(i=n=0; 1; i++){
      sqlite3_stmt *p = pArg->pStmt;
      sqlite3_int64 nLoop, nVisit;
      double rEst;
      int iSid;
      const char *zExplain;
      if( sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NLOOP, (void*)&nLoop) ){
        break;
      }
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_SELECTID, (void*)&iSid);
      if( iSid>mx ) mx = iSid;
      if( iSid!=k ) continue;
      if( n==0 ){
        rEstLoop = (double)nLoop;
        if( k>0 ) raw_printf(pArg->out, "-------- subquery %d -------\n", k);
      }
      n++;
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_NVISIT, (void*)&nVisit);
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EST, (void*)&rEst);
      sqlite3_stmt_scanstatus(p, i, SQLITE_SCANSTAT_EXPLAIN, (void*)&zExplain);
      utf8_printf(pArg->out, "Loop %2d: %s\n", n, zExplain);
      rEstLoop *= rEst;
      raw_printf(pArg->out, 
          "         nLoop=%-8lld nRow=%-8lld estRow=%-8lld estRow/Loop=%-8g\n",
          nLoop, nVisit, (sqlite3_int64)(rEstLoop+0.5), rEst
      );
    }
  }
  raw_printf(pArg->out, "---------------------------\n");
#endif
}

/*
** Parameter azArray points to a zero-terminated array of strings. zStr
** points to a single nul-terminated string. Return non-zero if zStr
** is equal, according to strcmp(), to any of the strings in the array.
1413
1414
1415
1416
1417
1418
1419




1420
1421
1422
1423



1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439














1440
1441
1442
1443
1444
1445
1446
                           "NextIfOpen", "PrevIfOpen", 0 };
  const char *azYield[] = { "Yield", "SeekLT", "SeekGT", "RowSetRead",
                            "Rewind", 0 };
  const char *azGoto[] = { "Goto", 0 };

  /* Try to figure out if this is really an EXPLAIN statement. If this
  ** cannot be verified, return early.  */




  zSql = sqlite3_sql(pSql);
  if( zSql==0 ) return;
  for(z=zSql; *z==' ' || *z=='\t' || *z=='\n' || *z=='\f' || *z=='\r'; z++);
  if( sqlite3_strnicmp(z, "explain", 7) ) return;




  for(iOp=0; SQLITE_ROW==sqlite3_step(pSql); iOp++){
    int i;
    int iAddr = sqlite3_column_int(pSql, 0);
    const char *zOp = (const char*)sqlite3_column_text(pSql, 1);

    /* Set p2 to the P2 field of the current opcode. Then, assuming that
    ** p2 is an instruction address, set variable p2op to the index of that
    ** instruction in the aiIndent[] array. p2 and p2op may be different if
    ** the current instruction is part of a sub-program generated by an
    ** SQL trigger or foreign key.  */
    int p2 = sqlite3_column_int(pSql, 3);
    int p2op = (p2 + (iOp-iAddr));

    /* Grow the p->aiIndent array as required */
    if( iOp>=nAlloc ){














      nAlloc += 100;
      p->aiIndent = (int*)sqlite3_realloc64(p->aiIndent, nAlloc*sizeof(int));
      abYield = (int*)sqlite3_realloc64(abYield, nAlloc*sizeof(int));
    }
    abYield[iOp] = str_in_array(zOp, azYield);
    p->aiIndent[iOp] = 0;
    p->nIndent = iOp+1;






>
>
>
>



|
>
>
>
















>
>
>
>
>
>
>
>
>
>
>
>
>
>







1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
                           "NextIfOpen", "PrevIfOpen", 0 };
  const char *azYield[] = { "Yield", "SeekLT", "SeekGT", "RowSetRead",
                            "Rewind", 0 };
  const char *azGoto[] = { "Goto", 0 };

  /* Try to figure out if this is really an EXPLAIN statement. If this
  ** cannot be verified, return early.  */
  if( sqlite3_column_count(pSql)!=8 ){
    p->cMode = p->mode;
    return;
  }
  zSql = sqlite3_sql(pSql);
  if( zSql==0 ) return;
  for(z=zSql; *z==' ' || *z=='\t' || *z=='\n' || *z=='\f' || *z=='\r'; z++);
  if( sqlite3_strnicmp(z, "explain", 7) ){
    p->cMode = p->mode;
    return;
  }

  for(iOp=0; SQLITE_ROW==sqlite3_step(pSql); iOp++){
    int i;
    int iAddr = sqlite3_column_int(pSql, 0);
    const char *zOp = (const char*)sqlite3_column_text(pSql, 1);

    /* Set p2 to the P2 field of the current opcode. Then, assuming that
    ** p2 is an instruction address, set variable p2op to the index of that
    ** instruction in the aiIndent[] array. p2 and p2op may be different if
    ** the current instruction is part of a sub-program generated by an
    ** SQL trigger or foreign key.  */
    int p2 = sqlite3_column_int(pSql, 3);
    int p2op = (p2 + (iOp-iAddr));

    /* Grow the p->aiIndent array as required */
    if( iOp>=nAlloc ){
      if( iOp==0 ){
        /* Do further verfication that this is explain output.  Abort if
        ** it is not */
        static const char *explainCols[] = {
           "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment" };
        int jj;
        for(jj=0; jj<ArraySize(explainCols); jj++){
          if( strcmp(sqlite3_column_name(pSql,jj),explainCols[jj])!=0 ){
            p->cMode = p->mode;
            sqlite3_reset(pSql);
            return;
          }
        }
      }
      nAlloc += 100;
      p->aiIndent = (int*)sqlite3_realloc64(p->aiIndent, nAlloc*sizeof(int));
      abYield = (int*)sqlite3_realloc64(abYield, nAlloc*sizeof(int));
    }
    abYield[iOp] = str_in_array(zOp, azYield);
    p->aiIndent[iOp] = 0;
    p->nIndent = iOp+1;
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542









1543
1544
1545
1546

1547
1548
1549
1550
1551
1552
1553
        pArg->pStmt = pStmt;
        pArg->cnt = 0;
      }

      /* echo the sql statement if echo on */
      if( pArg && pArg->echoOn ){
        const char *zStmtSql = sqlite3_sql(pStmt);
        fprintf(pArg->out, "%s\n", zStmtSql ? zStmtSql : zSql);
      }

      /* Show the EXPLAIN QUERY PLAN if .eqp is on */
      if( pArg && pArg->autoEQP ){
        sqlite3_stmt *pExplain;
        char *zEQP = sqlite3_mprintf("EXPLAIN QUERY PLAN %s",
                                     sqlite3_sql(pStmt));
        rc = sqlite3_prepare_v2(db, zEQP, -1, &pExplain, 0);
        if( rc==SQLITE_OK ){
          while( sqlite3_step(pExplain)==SQLITE_ROW ){
            fprintf(pArg->out,"--EQP-- %d,", sqlite3_column_int(pExplain, 0));
            fprintf(pArg->out,"%d,", sqlite3_column_int(pExplain, 1));
            fprintf(pArg->out,"%d,", sqlite3_column_int(pExplain, 2));
            fprintf(pArg->out,"%s\n", sqlite3_column_text(pExplain, 3));
          }
        }
        sqlite3_finalize(pExplain);
        sqlite3_free(zEQP);
      }










      /* If the shell is currently in ".explain" mode, gather the extra
      ** data required to add indents to the output.*/
      if( pArg && pArg->mode==MODE_Explain ){
        explain_data_prepare(pArg, pStmt);

      }

      /* perform the first step.  this will tell us if we
      ** have a result set or not and how wide it is.
      */
      rc = sqlite3_step(pStmt);
      /* if we have a result set... */






|










|
|
|
|






>
>
>
>
>
>
>
>
>
|
|
|
|
>







1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
        pArg->pStmt = pStmt;
        pArg->cnt = 0;
      }

      /* echo the sql statement if echo on */
      if( pArg && pArg->echoOn ){
        const char *zStmtSql = sqlite3_sql(pStmt);
        utf8_printf(pArg->out, "%s\n", zStmtSql ? zStmtSql : zSql);
      }

      /* Show the EXPLAIN QUERY PLAN if .eqp is on */
      if( pArg && pArg->autoEQP ){
        sqlite3_stmt *pExplain;
        char *zEQP = sqlite3_mprintf("EXPLAIN QUERY PLAN %s",
                                     sqlite3_sql(pStmt));
        rc = sqlite3_prepare_v2(db, zEQP, -1, &pExplain, 0);
        if( rc==SQLITE_OK ){
          while( sqlite3_step(pExplain)==SQLITE_ROW ){
            raw_printf(pArg->out,"--EQP-- %d,",sqlite3_column_int(pExplain, 0));
            raw_printf(pArg->out,"%d,", sqlite3_column_int(pExplain, 1));
            raw_printf(pArg->out,"%d,", sqlite3_column_int(pExplain, 2));
            utf8_printf(pArg->out,"%s\n", sqlite3_column_text(pExplain, 3));
          }
        }
        sqlite3_finalize(pExplain);
        sqlite3_free(zEQP);
      }

      if( pArg ){
        pArg->cMode = pArg->mode;
        if( pArg->autoExplain
         && sqlite3_column_count(pStmt)==8
         && sqlite3_strlike("%EXPLAIN%", sqlite3_sql(pStmt),0)==0
        ){
          pArg->cMode = MODE_Explain;
        }
      
        /* If the shell is currently in ".explain" mode, gather the extra
        ** data required to add indents to the output.*/
        if( pArg->cMode==MODE_Explain ){
          explain_data_prepare(pArg, pStmt);
        }
      }

      /* perform the first step.  this will tell us if we
      ** have a result set or not and how wide it is.
      */
      rc = sqlite3_step(pStmt);
      /* if we have a result set... */
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
            for(i=0; i<nCol; i++){
              azCols[i] = (char *)sqlite3_column_name(pStmt, i);
            }
            do{
              /* extract the data and data types */
              for(i=0; i<nCol; i++){
                aiTypes[i] = x = sqlite3_column_type(pStmt, i);
                if( x==SQLITE_BLOB && pArg && pArg->mode==MODE_Insert ){
                  azVals[i] = "";
                }else{
                  azVals[i] = (char*)sqlite3_column_text(pStmt, i);
                }
                if( !azVals[i] && (aiTypes[i]!=SQLITE_NULL) ){
                  rc = SQLITE_NOMEM;
                  break; /* from for */






|







1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
            for(i=0; i<nCol; i++){
              azCols[i] = (char *)sqlite3_column_name(pStmt, i);
            }
            do{
              /* extract the data and data types */
              for(i=0; i<nCol; i++){
                aiTypes[i] = x = sqlite3_column_type(pStmt, i);
                if( x==SQLITE_BLOB && pArg && pArg->cMode==MODE_Insert ){
                  azVals[i] = "";
                }else{
                  azVals[i] = (char*)sqlite3_column_text(pStmt, i);
                }
                if( !azVals[i] && (aiTypes[i]!=SQLITE_NULL) ){
                  rc = SQLITE_NOMEM;
                  break; /* from for */
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
  zTable = azArg[0];
  zType = azArg[1];
  zSql = azArg[2];
  
  if( strcmp(zTable, "sqlite_sequence")==0 ){
    zPrepStmt = "DELETE FROM sqlite_sequence;\n";
  }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 ){
    fprintf(p->out, "ANALYZE sqlite_master;\n");
  }else if( strncmp(zTable, "sqlite_", 7)==0 ){
    return 0;
  }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){
    char *zIns;
    if( !p->writableSchema ){
      fprintf(p->out, "PRAGMA writable_schema=ON;\n");
      p->writableSchema = 1;
    }
    zIns = sqlite3_mprintf(
       "INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"
       "VALUES('table','%q','%q',0,'%q');",
       zTable, zTable, zSql);
    fprintf(p->out, "%s\n", zIns);
    sqlite3_free(zIns);
    return 0;
  }else{
    fprintf(p->out, "%s;\n", zSql);
  }

  if( strcmp(zType, "table")==0 ){
    sqlite3_stmt *pTableInfo = 0;
    char *zSelect = 0;
    char *zTableInfo = 0;
    char *zTmp = 0;






|





|






|



|







1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
  zTable = azArg[0];
  zType = azArg[1];
  zSql = azArg[2];
  
  if( strcmp(zTable, "sqlite_sequence")==0 ){
    zPrepStmt = "DELETE FROM sqlite_sequence;\n";
  }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 ){
    raw_printf(p->out, "ANALYZE sqlite_master;\n");
  }else if( strncmp(zTable, "sqlite_", 7)==0 ){
    return 0;
  }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){
    char *zIns;
    if( !p->writableSchema ){
      raw_printf(p->out, "PRAGMA writable_schema=ON;\n");
      p->writableSchema = 1;
    }
    zIns = sqlite3_mprintf(
       "INSERT INTO sqlite_master(type,name,tbl_name,rootpage,sql)"
       "VALUES('table','%q','%q',0,'%q');",
       zTable, zTable, zSql);
    utf8_printf(p->out, "%s\n", zIns);
    sqlite3_free(zIns);
    return 0;
  }else{
    utf8_printf(p->out, "%s;\n", zSql);
  }

  if( strcmp(zType, "table")==0 ){
    sqlite3_stmt *pTableInfo = 0;
    char *zSelect = 0;
    char *zTableInfo = 0;
    char *zTmp = 0;
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785

1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
){
  int rc;
  char *zErr = 0;
  rc = sqlite3_exec(p->db, zQuery, dump_callback, p, &zErr);
  if( rc==SQLITE_CORRUPT ){
    char *zQ2;
    int len = strlen30(zQuery);
    fprintf(p->out, "/****** CORRUPTION ERROR *******/\n");
    if( zErr ){
      fprintf(p->out, "/****** %s ******/\n", zErr);
      sqlite3_free(zErr);
      zErr = 0;
    }
    zQ2 = malloc( len+100 );
    if( zQ2==0 ) return rc;
    sqlite3_snprintf(len+100, zQ2, "%s ORDER BY rowid DESC", zQuery);
    rc = sqlite3_exec(p->db, zQ2, dump_callback, p, &zErr);
    if( rc ){
      fprintf(p->out, "/****** ERROR: %s ******/\n", zErr);
    }else{
      rc = SQLITE_CORRUPT;
    }
    sqlite3_free(zErr);
    free(zQ2);
  }
  return rc;
}

/*
** Text of a help message
*/
static char zHelp[] =
  ".backup ?DB? FILE      Backup DB (default \"main\") to FILE\n"
  ".bail on|off           Stop after hitting an error.  Default OFF\n"
  ".binary on|off         Turn binary output on or off.  Default OFF\n"

  ".clone NEWDB           Clone data into NEWDB from the existing database\n"
  ".databases             List names and files of attached databases\n"
  ".dbinfo ?DB?           Show status information about the database\n"
  ".dump ?TABLE? ...      Dump the database in an SQL text format\n"
  "                         If TABLE specified, only dump tables matching\n"
  "                         LIKE pattern TABLE.\n"
  ".echo on|off           Turn command echo on or off\n"
  ".eqp on|off            Enable or disable automatic EXPLAIN QUERY PLAN\n"
  ".exit                  Exit this program\n"
  ".explain ?on|off?      Turn output mode suitable for EXPLAIN on or off.\n"
  "                         With no args, it turns EXPLAIN on.\n"
  ".fullschema            Show schema and the content of sqlite_stat tables\n"
  ".headers on|off        Turn display of headers on or off\n"
  ".help                  Show this message\n"
  ".import FILE TABLE     Import data from FILE into TABLE\n"
  ".indexes ?TABLE?       Show names of all indexes\n"
  "                         If TABLE specified, only show indexes for tables\n"
  "                         matching LIKE pattern TABLE.\n"






|

|








|
















>









|
<







1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916

1917
1918
1919
1920
1921
1922
1923
){
  int rc;
  char *zErr = 0;
  rc = sqlite3_exec(p->db, zQuery, dump_callback, p, &zErr);
  if( rc==SQLITE_CORRUPT ){
    char *zQ2;
    int len = strlen30(zQuery);
    raw_printf(p->out, "/****** CORRUPTION ERROR *******/\n");
    if( zErr ){
      utf8_printf(p->out, "/****** %s ******/\n", zErr);
      sqlite3_free(zErr);
      zErr = 0;
    }
    zQ2 = malloc( len+100 );
    if( zQ2==0 ) return rc;
    sqlite3_snprintf(len+100, zQ2, "%s ORDER BY rowid DESC", zQuery);
    rc = sqlite3_exec(p->db, zQ2, dump_callback, p, &zErr);
    if( rc ){
      utf8_printf(p->out, "/****** ERROR: %s ******/\n", zErr);
    }else{
      rc = SQLITE_CORRUPT;
    }
    sqlite3_free(zErr);
    free(zQ2);
  }
  return rc;
}

/*
** Text of a help message
*/
static char zHelp[] =
  ".backup ?DB? FILE      Backup DB (default \"main\") to FILE\n"
  ".bail on|off           Stop after hitting an error.  Default OFF\n"
  ".binary on|off         Turn binary output on or off.  Default OFF\n"
  ".changes on|off        Show number of rows changed by SQL\n"
  ".clone NEWDB           Clone data into NEWDB from the existing database\n"
  ".databases             List names and files of attached databases\n"
  ".dbinfo ?DB?           Show status information about the database\n"
  ".dump ?TABLE? ...      Dump the database in an SQL text format\n"
  "                         If TABLE specified, only dump tables matching\n"
  "                         LIKE pattern TABLE.\n"
  ".echo on|off           Turn command echo on or off\n"
  ".eqp on|off            Enable or disable automatic EXPLAIN QUERY PLAN\n"
  ".exit                  Exit this program\n"
  ".explain ?on|off|auto? Turn EXPLAIN output mode on or off or to automatic\n"

  ".fullschema            Show schema and the content of sqlite_stat tables\n"
  ".headers on|off        Turn display of headers on or off\n"
  ".help                  Show this message\n"
  ".import FILE TABLE     Import data from FILE into TABLE\n"
  ".indexes ?TABLE?       Show names of all indexes\n"
  "                         If TABLE specified, only show indexes for tables\n"
  "                         matching LIKE pattern TABLE.\n"
1841
1842
1843
1844
1845
1846
1847


1848
1849
1850
1851
1852
1853
1854
  ".system CMD ARGS...    Run CMD ARGS... in a system shell\n"
  ".tables ?TABLE?        List names of tables\n"
  "                         If TABLE specified, only list tables matching\n"
  "                         LIKE pattern TABLE.\n"
  ".timeout MS            Try opening locked tables for MS milliseconds\n"
  ".timer on|off          Turn SQL timer on or off\n"
  ".trace FILE|off        Output each SQL statement as it is run\n"


  ".vfsname ?AUX?         Print the name of the VFS stack\n"
  ".width NUM1 NUM2 ...   Set column widths for \"column\" mode\n"
  "                         Negative values right-justify\n"
;

/* Forward reference */
static int process_input(ShellState *p, FILE *in);






>
>







1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
  ".system CMD ARGS...    Run CMD ARGS... in a system shell\n"
  ".tables ?TABLE?        List names of tables\n"
  "                         If TABLE specified, only list tables matching\n"
  "                         LIKE pattern TABLE.\n"
  ".timeout MS            Try opening locked tables for MS milliseconds\n"
  ".timer on|off          Turn SQL timer on or off\n"
  ".trace FILE|off        Output each SQL statement as it is run\n"
  ".vfsinfo ?AUX?         Information about the top-level VFS\n"
  ".vfslist               List all available VFSes\n"
  ".vfsname ?AUX?         Print the name of the VFS stack\n"
  ".width NUM1 NUM2 ...   Set column widths for \"column\" mode\n"
  "                         Negative values right-justify\n"
;

/* Forward reference */
static int process_input(ShellState *p, FILE *in);
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
    sqlite3_open(p->zDbFilename, &p->db);
    globalDb = p->db;
    if( p->db && sqlite3_errcode(p->db)==SQLITE_OK ){
      sqlite3_create_function(p->db, "shellstatic", 0, SQLITE_UTF8, 0,
          shellstaticFunc, 0, 0);
    }
    if( p->db==0 || SQLITE_OK!=sqlite3_errcode(p->db) ){
      fprintf(stderr,"Error: unable to open database \"%s\": %s\n", 
          p->zDbFilename, sqlite3_errmsg(p->db));
      if( keepAlive ) return;
      exit(1);
    }
#ifndef SQLITE_OMIT_LOAD_EXTENSION
    sqlite3_enable_load_extension(p->db, 1);
#endif






|







2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
    sqlite3_open(p->zDbFilename, &p->db);
    globalDb = p->db;
    if( p->db && sqlite3_errcode(p->db)==SQLITE_OK ){
      sqlite3_create_function(p->db, "shellstatic", 0, SQLITE_UTF8, 0,
          shellstaticFunc, 0, 0);
    }
    if( p->db==0 || SQLITE_OK!=sqlite3_errcode(p->db) ){
      utf8_printf(stderr,"Error: unable to open database \"%s\": %s\n", 
          p->zDbFilename, sqlite3_errmsg(p->db));
      if( keepAlive ) return;
      exit(1);
    }
#ifndef SQLITE_OMIT_LOAD_EXTENSION
    sqlite3_enable_load_extension(p->db, 1);
#endif
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
  if( i>0 && zArg[i]==0 ) return (int)(integerValue(zArg) & 0xffffffff);
  if( sqlite3_stricmp(zArg, "on")==0 || sqlite3_stricmp(zArg,"yes")==0 ){
    return 1;
  }
  if( sqlite3_stricmp(zArg, "off")==0 || sqlite3_stricmp(zArg,"no")==0 ){
    return 0;
  }
  fprintf(stderr, "ERROR: Not a boolean value: \"%s\". Assuming \"no\".\n",
          zArg);
  return 0;
}

/*
** Close an output file, assuming it is not stderr or stdout
*/






|







2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
  if( i>0 && zArg[i]==0 ) return (int)(integerValue(zArg) & 0xffffffff);
  if( sqlite3_stricmp(zArg, "on")==0 || sqlite3_stricmp(zArg,"yes")==0 ){
    return 1;
  }
  if( sqlite3_stricmp(zArg, "off")==0 || sqlite3_stricmp(zArg,"no")==0 ){
    return 0;
  }
  utf8_printf(stderr, "ERROR: Not a boolean value: \"%s\". Assuming \"no\".\n",
          zArg);
  return 0;
}

/*
** Close an output file, assuming it is not stderr or stdout
*/
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
  }else if( strcmp(zFile, "stderr")==0 ){
    f = stderr;
  }else if( strcmp(zFile, "off")==0 ){
    f = 0;
  }else{
    f = fopen(zFile, "wb");
    if( f==0 ){
      fprintf(stderr, "Error: cannot open \"%s\"\n", zFile);
    }
  }
  return f;
}

/*
** A routine for handling output from sqlite3_trace().
*/
static void sql_trace_callback(void *pArg, const char *z){
  FILE *f = (FILE*)pArg;
  if( f ){
    int i = (int)strlen(z);
    while( i>0 && z[i-1]==';' ){ i--; }
    fprintf(f, "%.*s;\n", i, z);
  }
}

/*
** A no-op routine that runs with the ".breakpoint" doc-command.  This is
** a useful spot to set a debugger breakpoint.
*/






|













|







2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
  }else if( strcmp(zFile, "stderr")==0 ){
    f = stderr;
  }else if( strcmp(zFile, "off")==0 ){
    f = 0;
  }else{
    f = fopen(zFile, "wb");
    if( f==0 ){
      utf8_printf(stderr, "Error: cannot open \"%s\"\n", zFile);
    }
  }
  return f;
}

/*
** A routine for handling output from sqlite3_trace().
*/
static void sql_trace_callback(void *pArg, const char *z){
  FILE *f = (FILE*)pArg;
  if( f ){
    int i = (int)strlen(z);
    while( i>0 && z[i-1]==';' ){ i--; }
    utf8_printf(f, "%.*s;\n", i, z);
  }
}

/*
** A no-op routine that runs with the ".breakpoint" doc-command.  This is
** a useful spot to set a debugger breakpoint.
*/
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
/* Append a single byte to z[] */
static void import_append_char(ImportCtx *p, int c){
  if( p->n+1>=p->nAlloc ){
    p->nAlloc += p->nAlloc + 100;
    p->z = sqlite3_realloc64(p->z, p->nAlloc);
    if( p->z==0 ){
      fprintf(stderr, "out of memory\n");
      exit(1);
    }
  }
  p->z[p->n++] = (char)c;
}

/* Read a single field of CSV text.  Compatible with rfc4180 and extended






|







2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
/* Append a single byte to z[] */
static void import_append_char(ImportCtx *p, int c){
  if( p->n+1>=p->nAlloc ){
    p->nAlloc += p->nAlloc + 100;
    p->z = sqlite3_realloc64(p->z, p->nAlloc);
    if( p->z==0 ){
      raw_printf(stderr, "out of memory\n");
      exit(1);
    }
  }
  p->z[p->n++] = (char)c;
}

/* Read a single field of CSV text.  Compatible with rfc4180 and extended
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
       || (c==EOF && pc==cQuote)
      ){
        do{ p->n--; }while( p->z[p->n]!=cQuote );
        p->cTerm = c;
        break;
      }
      if( pc==cQuote && c!='\r' ){
        fprintf(stderr, "%s:%d: unescaped %c character\n",
                p->zFile, p->nLine, cQuote);
      }
      if( c==EOF ){
        fprintf(stderr, "%s:%d: unterminated %c-quoted field\n",
                p->zFile, startLine, cQuote);
        p->cTerm = c;
        break;
      }
      import_append_char(p, c);
      ppc = pc;
      pc = c;






|



|







2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
       || (c==EOF && pc==cQuote)
      ){
        do{ p->n--; }while( p->z[p->n]!=cQuote );
        p->cTerm = c;
        break;
      }
      if( pc==cQuote && c!='\r' ){
        utf8_printf(stderr, "%s:%d: unescaped %c character\n",
                p->zFile, p->nLine, cQuote);
      }
      if( c==EOF ){
        utf8_printf(stderr, "%s:%d: unterminated %c-quoted field\n",
                p->zFile, startLine, cQuote);
        p->cTerm = c;
        break;
      }
      import_append_char(p, c);
      ppc = pc;
      pc = c;
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
  int k = 0;
  int cnt = 0;
  const int spinRate = 10000;

  zQuery = sqlite3_mprintf("SELECT * FROM \"%w\"", zTable);
  rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
  if( rc ){
    fprintf(stderr, "Error %d: %s on [%s]\n",
            sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
            zQuery);
    goto end_data_xfer;
  }
  n = sqlite3_column_count(pQuery);
  zInsert = sqlite3_malloc64(200 + nTable + n*3);
  if( zInsert==0 ){
    fprintf(stderr, "out of memory\n");
    goto end_data_xfer;
  }
  sqlite3_snprintf(200+nTable,zInsert,
                   "INSERT OR IGNORE INTO \"%s\" VALUES(?", zTable);
  i = (int)strlen(zInsert);
  for(j=1; j<n; j++){
    memcpy(zInsert+i, ",?", 2);
    i += 2;
  }
  memcpy(zInsert+i, ");", 3);
  rc = sqlite3_prepare_v2(newDb, zInsert, -1, &pInsert, 0);
  if( rc ){
    fprintf(stderr, "Error %d: %s on [%s]\n",
            sqlite3_extended_errcode(newDb), sqlite3_errmsg(newDb),
            zQuery);
    goto end_data_xfer;
  }
  for(k=0; k<2; k++){
    while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
      for(i=0; i<n; i++){






|







|












|







2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
  int k = 0;
  int cnt = 0;
  const int spinRate = 10000;

  zQuery = sqlite3_mprintf("SELECT * FROM \"%w\"", zTable);
  rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
  if( rc ){
    utf8_printf(stderr, "Error %d: %s on [%s]\n",
            sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
            zQuery);
    goto end_data_xfer;
  }
  n = sqlite3_column_count(pQuery);
  zInsert = sqlite3_malloc64(200 + nTable + n*3);
  if( zInsert==0 ){
    raw_printf(stderr, "out of memory\n");
    goto end_data_xfer;
  }
  sqlite3_snprintf(200+nTable,zInsert,
                   "INSERT OR IGNORE INTO \"%s\" VALUES(?", zTable);
  i = (int)strlen(zInsert);
  for(j=1; j<n; j++){
    memcpy(zInsert+i, ",?", 2);
    i += 2;
  }
  memcpy(zInsert+i, ");", 3);
  rc = sqlite3_prepare_v2(newDb, zInsert, -1, &pInsert, 0);
  if( rc ){
    utf8_printf(stderr, "Error %d: %s on [%s]\n",
            sqlite3_extended_errcode(newDb), sqlite3_errmsg(newDb),
            zQuery);
    goto end_data_xfer;
  }
  for(k=0; k<2; k++){
    while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
      for(i=0; i<n; i++){
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
                                            SQLITE_STATIC);
            break;
          }
        }
      } /* End for */
      rc = sqlite3_step(pInsert);
      if( rc!=SQLITE_OK && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){
        fprintf(stderr, "Error %d: %s\n", sqlite3_extended_errcode(newDb),
                        sqlite3_errmsg(newDb));
      }
      sqlite3_reset(pInsert);
      cnt++;
      if( (cnt%spinRate)==0 ){
        printf("%c\b", "|/-\\"[(cnt/spinRate)%4]);
        fflush(stdout);
      }
    } /* End while */
    if( rc==SQLITE_DONE ) break;
    sqlite3_finalize(pQuery);
    sqlite3_free(zQuery);
    zQuery = sqlite3_mprintf("SELECT * FROM \"%w\" ORDER BY rowid DESC;",
                             zTable);
    rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
    if( rc ){
      fprintf(stderr, "Warning: cannot step \"%s\" backwards", zTable);
      break;
    }
  } /* End for(k=0...) */

end_data_xfer:
  sqlite3_finalize(pQuery);
  sqlite3_finalize(pInsert);






|
















|







2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
                                            SQLITE_STATIC);
            break;
          }
        }
      } /* End for */
      rc = sqlite3_step(pInsert);
      if( rc!=SQLITE_OK && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){
        utf8_printf(stderr, "Error %d: %s\n", sqlite3_extended_errcode(newDb),
                        sqlite3_errmsg(newDb));
      }
      sqlite3_reset(pInsert);
      cnt++;
      if( (cnt%spinRate)==0 ){
        printf("%c\b", "|/-\\"[(cnt/spinRate)%4]);
        fflush(stdout);
      }
    } /* End while */
    if( rc==SQLITE_DONE ) break;
    sqlite3_finalize(pQuery);
    sqlite3_free(zQuery);
    zQuery = sqlite3_mprintf("SELECT * FROM \"%w\" ORDER BY rowid DESC;",
                             zTable);
    rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
    if( rc ){
      utf8_printf(stderr, "Warning: cannot step \"%s\" backwards", zTable);
      break;
    }
  } /* End for(k=0...) */

end_data_xfer:
  sqlite3_finalize(pQuery);
  sqlite3_finalize(pInsert);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
  const unsigned char *zSql;
  char *zErrMsg = 0;

  zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
                           " WHERE %s", zWhere);
  rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
  if( rc ){
    fprintf(stderr, "Error: (%d) %s on [%s]\n",
                    sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
                    zQuery);
    goto end_schema_xfer;
  }
  while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
    zName = sqlite3_column_text(pQuery, 0);
    zSql = sqlite3_column_text(pQuery, 1);
    printf("%s... ", zName); fflush(stdout);
    sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
    if( zErrMsg ){
      fprintf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
      sqlite3_free(zErrMsg);
      zErrMsg = 0;
    }
    if( xForEach ){
      xForEach(p, newDb, (const char*)zName);
    }
    printf("done\n");
  }
  if( rc!=SQLITE_DONE ){
    sqlite3_finalize(pQuery);
    sqlite3_free(zQuery);
    zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
                             " WHERE %s ORDER BY rowid DESC", zWhere);
    rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
    if( rc ){
      fprintf(stderr, "Error: (%d) %s on [%s]\n",
                      sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
                      zQuery);
      goto end_schema_xfer;
    }
    while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
      zName = sqlite3_column_text(pQuery, 0);
      zSql = sqlite3_column_text(pQuery, 1);
      printf("%s... ", zName); fflush(stdout);
      sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
      if( zErrMsg ){
        fprintf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
        sqlite3_free(zErrMsg);
        zErrMsg = 0;
      }
      if( xForEach ){
        xForEach(p, newDb, (const char*)zName);
      }
      printf("done\n");






|










|















|










|







2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
  const unsigned char *zSql;
  char *zErrMsg = 0;

  zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
                           " WHERE %s", zWhere);
  rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
  if( rc ){
    utf8_printf(stderr, "Error: (%d) %s on [%s]\n",
                    sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
                    zQuery);
    goto end_schema_xfer;
  }
  while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
    zName = sqlite3_column_text(pQuery, 0);
    zSql = sqlite3_column_text(pQuery, 1);
    printf("%s... ", zName); fflush(stdout);
    sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
    if( zErrMsg ){
      utf8_printf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
      sqlite3_free(zErrMsg);
      zErrMsg = 0;
    }
    if( xForEach ){
      xForEach(p, newDb, (const char*)zName);
    }
    printf("done\n");
  }
  if( rc!=SQLITE_DONE ){
    sqlite3_finalize(pQuery);
    sqlite3_free(zQuery);
    zQuery = sqlite3_mprintf("SELECT name, sql FROM sqlite_master"
                             " WHERE %s ORDER BY rowid DESC", zWhere);
    rc = sqlite3_prepare_v2(p->db, zQuery, -1, &pQuery, 0);
    if( rc ){
      utf8_printf(stderr, "Error: (%d) %s on [%s]\n",
                      sqlite3_extended_errcode(p->db), sqlite3_errmsg(p->db),
                      zQuery);
      goto end_schema_xfer;
    }
    while( (rc = sqlite3_step(pQuery))==SQLITE_ROW ){
      zName = sqlite3_column_text(pQuery, 0);
      zSql = sqlite3_column_text(pQuery, 1);
      printf("%s... ", zName); fflush(stdout);
      sqlite3_exec(newDb, (const char*)zSql, 0, 0, &zErrMsg);
      if( zErrMsg ){
        utf8_printf(stderr, "Error: %s\nSQL: [%s]\n", zErrMsg, zSql);
        sqlite3_free(zErrMsg);
        zErrMsg = 0;
      }
      if( xForEach ){
        xForEach(p, newDb, (const char*)zName);
      }
      printf("done\n");
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
** as possible out of the main database (which might be corrupt) and write it
** into zNewDb.
*/
static void tryToClone(ShellState *p, const char *zNewDb){
  int rc;
  sqlite3 *newDb = 0;
  if( access(zNewDb,0)==0 ){
    fprintf(stderr, "File \"%s\" already exists.\n", zNewDb);
    return;
  }
  rc = sqlite3_open(zNewDb, &newDb);
  if( rc ){
    fprintf(stderr, "Cannot create output database: %s\n",
            sqlite3_errmsg(newDb));
  }else{
    sqlite3_exec(p->db, "PRAGMA writable_schema=ON;", 0, 0, 0);
    sqlite3_exec(newDb, "BEGIN EXCLUSIVE;", 0, 0, 0);
    tryToCloneSchema(p, newDb, "type='table'", tryToCloneData);
    tryToCloneSchema(p, newDb, "type!='table'", 0);
    sqlite3_exec(newDb, "COMMIT;", 0, 0, 0);






|




|







2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
** as possible out of the main database (which might be corrupt) and write it
** into zNewDb.
*/
static void tryToClone(ShellState *p, const char *zNewDb){
  int rc;
  sqlite3 *newDb = 0;
  if( access(zNewDb,0)==0 ){
    utf8_printf(stderr, "File \"%s\" already exists.\n", zNewDb);
    return;
  }
  rc = sqlite3_open(zNewDb, &newDb);
  if( rc ){
    utf8_printf(stderr, "Cannot create output database: %s\n",
            sqlite3_errmsg(newDb));
  }else{
    sqlite3_exec(p->db, "PRAGMA writable_schema=ON;", 0, 0, 0);
    sqlite3_exec(newDb, "BEGIN EXCLUSIVE;", 0, 0, 0);
    tryToCloneSchema(p, newDb, "type='table'", tryToCloneData);
    tryToCloneSchema(p, newDb, "type!='table'", 0);
    sqlite3_exec(newDb, "COMMIT;", 0, 0, 0);
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
















2613
2614
2615
2616
2617
2618
2619
     { "number of triggers:",
       "SELECT count(*) FROM %s WHERE type='trigger'" },
     { "number of views:",
       "SELECT count(*) FROM %s WHERE type='view'" },
     { "schema size:",
       "SELECT total(length(sql)) FROM %s" },
  };
  sqlite3_file *pFile;
  int i;
  char *zSchemaTab;
  char *zDb = nArg>=2 ? azArg[1] : "main";
  unsigned char aHdr[100];
  open_db(p, 0);
  if( p->db==0 ) return 1;
  sqlite3_file_control(p->db, zDb, SQLITE_FCNTL_FILE_POINTER, &pFile);
  if( pFile==0 || pFile->pMethods==0 || pFile->pMethods->xRead==0 ){
    return 1;
  }
  i = pFile->pMethods->xRead(pFile, aHdr, 100, 0);
  if( i!=SQLITE_OK ){
    fprintf(stderr, "unable to read database header\n");
    return 1;
  }
  i = get2byteInt(aHdr+16);
  if( i==1 ) i = 65536;
  fprintf(p->out, "%-20s %d\n", "database page size:", i);
  fprintf(p->out, "%-20s %d\n", "write format:", aHdr[18]);
  fprintf(p->out, "%-20s %d\n", "read format:", aHdr[19]);
  fprintf(p->out, "%-20s %d\n", "reserved bytes:", aHdr[20]);
  for(i=0; i<ArraySize(aField); i++){
    int ofst = aField[i].ofst;
    unsigned int val = get4byteInt(aHdr + ofst);
    fprintf(p->out, "%-20s %u", aField[i].zName, val);
    switch( ofst ){
      case 56: {
        if( val==1 ) fprintf(p->out, " (utf8)"); 
        if( val==2 ) fprintf(p->out, " (utf16le)"); 
        if( val==3 ) fprintf(p->out, " (utf16be)"); 
      }
    }
    fprintf(p->out, "\n");
  }
  if( zDb==0 ){
    zSchemaTab = sqlite3_mprintf("main.sqlite_master");
  }else if( strcmp(zDb,"temp")==0 ){
    zSchemaTab = sqlite3_mprintf("%s", "sqlite_temp_master");
  }else{
    zSchemaTab = sqlite3_mprintf("\"%w\".sqlite_master", zDb);
  }
  for(i=0; i<ArraySize(aQuery); i++){
    char *zSql = sqlite3_mprintf(aQuery[i].zSql, zSchemaTab);
    int val = db_int(p, zSql);
    sqlite3_free(zSql);
    fprintf(p->out, "%-20s %d\n", aQuery[i].zName, val);
  }
  sqlite3_free(zSchemaTab);
  return 0;
}


















/*
** If an input line begins with "." then invoke this routine to
** process that line.
**
** Return 1 on error, 2 to exit, and 0 otherwise.
*/






|












|




|
|
|
|



|


|
|
|


|












|





>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
     { "number of triggers:",
       "SELECT count(*) FROM %s WHERE type='trigger'" },
     { "number of views:",
       "SELECT count(*) FROM %s WHERE type='view'" },
     { "schema size:",
       "SELECT total(length(sql)) FROM %s" },
  };
  sqlite3_file *pFile = 0;
  int i;
  char *zSchemaTab;
  char *zDb = nArg>=2 ? azArg[1] : "main";
  unsigned char aHdr[100];
  open_db(p, 0);
  if( p->db==0 ) return 1;
  sqlite3_file_control(p->db, zDb, SQLITE_FCNTL_FILE_POINTER, &pFile);
  if( pFile==0 || pFile->pMethods==0 || pFile->pMethods->xRead==0 ){
    return 1;
  }
  i = pFile->pMethods->xRead(pFile, aHdr, 100, 0);
  if( i!=SQLITE_OK ){
    raw_printf(stderr, "unable to read database header\n");
    return 1;
  }
  i = get2byteInt(aHdr+16);
  if( i==1 ) i = 65536;
  utf8_printf(p->out, "%-20s %d\n", "database page size:", i);
  utf8_printf(p->out, "%-20s %d\n", "write format:", aHdr[18]);
  utf8_printf(p->out, "%-20s %d\n", "read format:", aHdr[19]);
  utf8_printf(p->out, "%-20s %d\n", "reserved bytes:", aHdr[20]);
  for(i=0; i<ArraySize(aField); i++){
    int ofst = aField[i].ofst;
    unsigned int val = get4byteInt(aHdr + ofst);
    utf8_printf(p->out, "%-20s %u", aField[i].zName, val);
    switch( ofst ){
      case 56: {
        if( val==1 ) raw_printf(p->out, " (utf8)"); 
        if( val==2 ) raw_printf(p->out, " (utf16le)"); 
        if( val==3 ) raw_printf(p->out, " (utf16be)"); 
      }
    }
    raw_printf(p->out, "\n");
  }
  if( zDb==0 ){
    zSchemaTab = sqlite3_mprintf("main.sqlite_master");
  }else if( strcmp(zDb,"temp")==0 ){
    zSchemaTab = sqlite3_mprintf("%s", "sqlite_temp_master");
  }else{
    zSchemaTab = sqlite3_mprintf("\"%w\".sqlite_master", zDb);
  }
  for(i=0; i<ArraySize(aQuery); i++){
    char *zSql = sqlite3_mprintf(aQuery[i].zSql, zSchemaTab);
    int val = db_int(p, zSql);
    sqlite3_free(zSql);
    utf8_printf(p->out, "%-20s %d\n", aQuery[i].zName, val);
  }
  sqlite3_free(zSchemaTab);
  return 0;
}

/*
** Print the current sqlite3_errmsg() value to stderr and return 1.
*/
static int shellDatabaseError(sqlite3 *db){
  const char *zErr = sqlite3_errmsg(db);
  utf8_printf(stderr, "Error: %s\n", zErr);
  return 1;
}

/*
** Print an out-of-memory message to stderr and return 1.
*/
static int shellNomemError(void){
  raw_printf(stderr, "Error: out of memory\n");
  return 1;
}

/*
** If an input line begins with "." then invoke this routine to
** process that line.
**
** Return 1 on error, 2 to exit, and 0 otherwise.
*/
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739









2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
    int j;
    for(j=1; j<nArg; j++){
      const char *z = azArg[j];
      if( z[0]=='-' ){
        while( z[0]=='-' ) z++;
        /* No options to process at this time */
        {
          fprintf(stderr, "unknown option: %s\n", azArg[j]);
          return 1;
        }
      }else if( zDestFile==0 ){
        zDestFile = azArg[j];
      }else if( zDb==0 ){
        zDb = zDestFile;
        zDestFile = azArg[j];
      }else{
        fprintf(stderr, "too many arguments to .backup\n");
        return 1;
      }
    }
    if( zDestFile==0 ){
      fprintf(stderr, "missing FILENAME argument on .backup\n");
      return 1;
    }
    if( zDb==0 ) zDb = "main";
    rc = sqlite3_open(zDestFile, &pDest);
    if( rc!=SQLITE_OK ){
      fprintf(stderr, "Error: cannot open \"%s\"\n", zDestFile);
      sqlite3_close(pDest);
      return 1;
    }
    open_db(p, 0);
    pBackup = sqlite3_backup_init(pDest, "main", p->db, zDb);
    if( pBackup==0 ){
      fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest));
      sqlite3_close(pDest);
      return 1;
    }
    while(  (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK ){}
    sqlite3_backup_finish(pBackup);
    if( rc==SQLITE_DONE ){
      rc = 0;
    }else{
      fprintf(stderr, "Error: %s\n", sqlite3_errmsg(pDest));
      rc = 1;
    }
    sqlite3_close(pDest);
  }else

  if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 ){
    if( nArg==2 ){
      bail_on_error = booleanValue(azArg[1]);
    }else{
      fprintf(stderr, "Usage: .bail on|off\n");
      rc = 1;
    }
  }else

  if( c=='b' && n>=3 && strncmp(azArg[0], "binary", n)==0 ){
    if( nArg==2 ){
      if( booleanValue(azArg[1]) ){
        setBinaryMode(p->out);
      }else{
        setTextMode(p->out);
      }
    }else{
      fprintf(stderr, "Usage: .binary on|off\n");
      rc = 1;
    }
  }else

  /* The undocumented ".breakpoint" command causes a call to the no-op
  ** routine named test_breakpoint().
  */
  if( c=='b' && n>=3 && strncmp(azArg[0], "breakpoint", n)==0 ){
    test_breakpoint();
  }else










  if( c=='c' && strncmp(azArg[0], "clone", n)==0 ){
    if( nArg==2 ){
      tryToClone(p, azArg[1]);
    }else{
      fprintf(stderr, "Usage: .clone FILENAME\n");
      rc = 1;
    }
  }else

  if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 1;
    data.mode = MODE_Column;
    data.colWidth[0] = 3;
    data.colWidth[1] = 15;
    data.colWidth[2] = 58;
    data.cnt = 0;
    sqlite3_exec(p->db, "PRAGMA database_list; ", callback, &data, &zErrMsg);
    if( zErrMsg ){
      fprintf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }
  }else

  if( c=='d' && strncmp(azArg[0], "dbinfo", n)==0 ){
    rc = shell_dbinfo_command(p, nArg, azArg);
  }else

  if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){
    open_db(p, 0);
    /* When playing back a "dump", the content might appear in an order
    ** which causes immediate foreign key constraints to be violated.
    ** So disable foreign-key constraint enforcement to prevent problems. */
    if( nArg!=1 && nArg!=2 ){
      fprintf(stderr, "Usage: .dump ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    fprintf(p->out, "PRAGMA foreign_keys=OFF;\n");
    fprintf(p->out, "BEGIN TRANSACTION;\n");
    p->writableSchema = 0;
    sqlite3_exec(p->db, "SAVEPOINT dump; PRAGMA writable_schema=ON", 0, 0, 0);
    p->nErr = 0;
    if( nArg==1 ){
      run_schema_dump_query(p, 
        "SELECT name, type, sql FROM sqlite_master "
        "WHERE sql NOT NULL AND type=='table' AND name!='sqlite_sequence'"






|








|




|





|






|








|









|












|










>
>
>
>
>
>
>
>
>





|










|






|















|



|
|







2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
    int j;
    for(j=1; j<nArg; j++){
      const char *z = azArg[j];
      if( z[0]=='-' ){
        while( z[0]=='-' ) z++;
        /* No options to process at this time */
        {
          utf8_printf(stderr, "unknown option: %s\n", azArg[j]);
          return 1;
        }
      }else if( zDestFile==0 ){
        zDestFile = azArg[j];
      }else if( zDb==0 ){
        zDb = zDestFile;
        zDestFile = azArg[j];
      }else{
        raw_printf(stderr, "too many arguments to .backup\n");
        return 1;
      }
    }
    if( zDestFile==0 ){
      raw_printf(stderr, "missing FILENAME argument on .backup\n");
      return 1;
    }
    if( zDb==0 ) zDb = "main";
    rc = sqlite3_open(zDestFile, &pDest);
    if( rc!=SQLITE_OK ){
      utf8_printf(stderr, "Error: cannot open \"%s\"\n", zDestFile);
      sqlite3_close(pDest);
      return 1;
    }
    open_db(p, 0);
    pBackup = sqlite3_backup_init(pDest, "main", p->db, zDb);
    if( pBackup==0 ){
      utf8_printf(stderr, "Error: %s\n", sqlite3_errmsg(pDest));
      sqlite3_close(pDest);
      return 1;
    }
    while(  (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK ){}
    sqlite3_backup_finish(pBackup);
    if( rc==SQLITE_DONE ){
      rc = 0;
    }else{
      utf8_printf(stderr, "Error: %s\n", sqlite3_errmsg(pDest));
      rc = 1;
    }
    sqlite3_close(pDest);
  }else

  if( c=='b' && n>=3 && strncmp(azArg[0], "bail", n)==0 ){
    if( nArg==2 ){
      bail_on_error = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .bail on|off\n");
      rc = 1;
    }
  }else

  if( c=='b' && n>=3 && strncmp(azArg[0], "binary", n)==0 ){
    if( nArg==2 ){
      if( booleanValue(azArg[1]) ){
        setBinaryMode(p->out);
      }else{
        setTextMode(p->out);
      }
    }else{
      raw_printf(stderr, "Usage: .binary on|off\n");
      rc = 1;
    }
  }else

  /* The undocumented ".breakpoint" command causes a call to the no-op
  ** routine named test_breakpoint().
  */
  if( c=='b' && n>=3 && strncmp(azArg[0], "breakpoint", n)==0 ){
    test_breakpoint();
  }else

  if( c=='c' && n>=3 && strncmp(azArg[0], "changes", n)==0 ){
    if( nArg==2 ){
      p->countChanges = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .changes on|off\n");
      rc = 1;
    }
  }else

  if( c=='c' && strncmp(azArg[0], "clone", n)==0 ){
    if( nArg==2 ){
      tryToClone(p, azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .clone FILENAME\n");
      rc = 1;
    }
  }else

  if( c=='d' && n>1 && strncmp(azArg[0], "databases", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 1;
    data.cMode = data.mode = MODE_Column;
    data.colWidth[0] = 3;
    data.colWidth[1] = 15;
    data.colWidth[2] = 58;
    data.cnt = 0;
    sqlite3_exec(p->db, "PRAGMA database_list; ", callback, &data, &zErrMsg);
    if( zErrMsg ){
      utf8_printf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }
  }else

  if( c=='d' && strncmp(azArg[0], "dbinfo", n)==0 ){
    rc = shell_dbinfo_command(p, nArg, azArg);
  }else

  if( c=='d' && strncmp(azArg[0], "dump", n)==0 ){
    open_db(p, 0);
    /* When playing back a "dump", the content might appear in an order
    ** which causes immediate foreign key constraints to be violated.
    ** So disable foreign-key constraint enforcement to prevent problems. */
    if( nArg!=1 && nArg!=2 ){
      raw_printf(stderr, "Usage: .dump ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    raw_printf(p->out, "PRAGMA foreign_keys=OFF;\n");
    raw_printf(p->out, "BEGIN TRANSACTION;\n");
    p->writableSchema = 0;
    sqlite3_exec(p->db, "SAVEPOINT dump; PRAGMA writable_schema=ON", 0, 0, 0);
    p->nErr = 0;
    if( nArg==1 ){
      run_schema_dump_query(p, 
        "SELECT name, type, sql FROM sqlite_master "
        "WHERE sql NOT NULL AND type=='table' AND name!='sqlite_sequence'"
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862

2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877

2878

2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973

2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
          "  AND type IN ('index','trigger','view')"
          "  AND tbl_name LIKE shellstatic()", 0
        );
        zShellStatic = 0;
      }
    }
    if( p->writableSchema ){
      fprintf(p->out, "PRAGMA writable_schema=OFF;\n");
      p->writableSchema = 0;
    }
    sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
    sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0);
    fprintf(p->out, p->nErr ? "ROLLBACK; -- due to errors\n" : "COMMIT;\n");
  }else

  if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
    if( nArg==2 ){
      p->echoOn = booleanValue(azArg[1]);
    }else{
      fprintf(stderr, "Usage: .echo on|off\n");
      rc = 1;
    }
  }else

  if( c=='e' && strncmp(azArg[0], "eqp", n)==0 ){
    if( nArg==2 ){
      p->autoEQP = booleanValue(azArg[1]);
    }else{
      fprintf(stderr, "Usage: .eqp on|off\n");
      rc = 1;
    }   
  }else

  if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){
    if( nArg>1 && (rc = (int)integerValue(azArg[1]))!=0 ) exit(rc);
    rc = 2;
  }else

  if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){
    int val = nArg>=2 ? booleanValue(azArg[1]) : 1;
    if(val == 1) {
      if(!p->normalMode.valid) {
        p->normalMode.valid = 1;
        p->normalMode.mode = p->mode;
        p->normalMode.showHeader = p->showHeader;
        memcpy(p->normalMode.colWidth,p->colWidth,sizeof(p->colWidth));
      }
      /* We could put this code under the !p->explainValid
      ** condition so that it does not execute if we are already in
      ** explain mode. However, always executing it allows us an easy
      ** was to reset to explain mode in case the user previously

      ** did an .explain followed by a .width, .mode or .header
      ** command.
      */
      p->mode = MODE_Explain;
      p->showHeader = 1;
      memset(p->colWidth,0,sizeof(p->colWidth));
      p->colWidth[0] = 4;                  /* addr */
      p->colWidth[1] = 13;                 /* opcode */
      p->colWidth[2] = 4;                  /* P1 */
      p->colWidth[3] = 4;                  /* P2 */
      p->colWidth[4] = 4;                  /* P3 */
      p->colWidth[5] = 13;                 /* P4 */
      p->colWidth[6] = 2;                  /* P5 */
      p->colWidth[7] = 13;                  /* Comment */
    }else if (p->normalMode.valid) {

      p->normalMode.valid = 0;

      p->mode = p->normalMode.mode;
      p->showHeader = p->normalMode.showHeader;
      memcpy(p->colWidth,p->normalMode.colWidth,sizeof(p->colWidth));
    }
  }else

  if( c=='f' && strncmp(azArg[0], "fullschema", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    int doStats = 0;
    if( nArg!=1 ){
      fprintf(stderr, "Usage: .fullschema\n");
      rc = 1;
      goto meta_command_exit;
    }
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.mode = MODE_Semi;
    rc = sqlite3_exec(p->db,
       "SELECT sql FROM"
       "  (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
       "     FROM sqlite_master UNION ALL"
       "   SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
       "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
       "ORDER BY rowid",
       callback, &data, &zErrMsg
    );
    if( rc==SQLITE_OK ){
      sqlite3_stmt *pStmt;
      rc = sqlite3_prepare_v2(p->db,
               "SELECT rowid FROM sqlite_master"
               " WHERE name GLOB 'sqlite_stat[134]'",
               -1, &pStmt, 0);
      doStats = sqlite3_step(pStmt)==SQLITE_ROW;
      sqlite3_finalize(pStmt);
    }
    if( doStats==0 ){
      fprintf(p->out, "/* No STAT tables available */\n");
    }else{
      fprintf(p->out, "ANALYZE sqlite_master;\n");
      sqlite3_exec(p->db, "SELECT 'ANALYZE sqlite_master'",
                   callback, &data, &zErrMsg);
      data.mode = MODE_Insert;
      data.zDestTable = "sqlite_stat1";
      shell_exec(p->db, "SELECT * FROM sqlite_stat1",
                 shell_callback, &data,&zErrMsg);
      data.zDestTable = "sqlite_stat3";
      shell_exec(p->db, "SELECT * FROM sqlite_stat3",
                 shell_callback, &data,&zErrMsg);
      data.zDestTable = "sqlite_stat4";
      shell_exec(p->db, "SELECT * FROM sqlite_stat4",
                 shell_callback, &data, &zErrMsg);
      fprintf(p->out, "ANALYZE sqlite_master;\n");
    }
  }else

  if( c=='h' && strncmp(azArg[0], "headers", n)==0 ){
    if( nArg==2 ){
      p->showHeader = booleanValue(azArg[1]);
    }else{
      fprintf(stderr, "Usage: .headers on|off\n");
      rc = 1;
    }
  }else

  if( c=='h' && strncmp(azArg[0], "help", n)==0 ){
    fprintf(p->out, "%s", zHelp);
  }else

  if( c=='i' && strncmp(azArg[0], "import", n)==0 ){
    char *zTable;               /* Insert data into this table */
    char *zFile;                /* Name of file to extra content from */
    sqlite3_stmt *pStmt = NULL; /* A statement */
    int nCol;                   /* Number of columns in the table */
    int nByte;                  /* Number of bytes in an SQL string */
    int i, j;                   /* Loop counters */
    int needCommit;             /* True to COMMIT or ROLLBACK at end */
    int nSep;                   /* Number of bytes in p->colSeparator[] */
    char *zSql;                 /* An SQL statement */
    ImportCtx sCtx;             /* Reader context */
    char *(SQLITE_CDECL *xRead)(ImportCtx*); /* Func to read one value */
    int (SQLITE_CDECL *xCloser)(FILE*);      /* Func to close file */

    if( nArg!=3 ){
      fprintf(stderr, "Usage: .import FILE TABLE\n");
      goto meta_command_exit;
    }
    zFile = azArg[1];
    zTable = azArg[2];
    seenInterrupt = 0;
    memset(&sCtx, 0, sizeof(sCtx));
    open_db(p, 0);
    nSep = strlen30(p->colSeparator);
    if( nSep==0 ){

      fprintf(stderr, "Error: non-null column separator required for import\n");
      return 1;
    }
    if( nSep>1 ){
      fprintf(stderr, "Error: multi-character column separators not allowed"
                      " for import\n");
      return 1;
    }
    nSep = strlen30(p->rowSeparator);
    if( nSep==0 ){
      fprintf(stderr, "Error: non-null row separator required for import\n");
      return 1;
    }
    if( nSep==2 && p->mode==MODE_Csv && strcmp(p->rowSeparator, SEP_CrLf)==0 ){
      /* When importing CSV (only), if the row separator is set to the
      ** default output row separator, change it to the default input
      ** row separator.  This avoids having to maintain different input
      ** and output row separators. */
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row);
      nSep = strlen30(p->rowSeparator);
    }
    if( nSep>1 ){
      fprintf(stderr, "Error: multi-character row separators not allowed"
                      " for import\n");
      return 1;
    }
    sCtx.zFile = zFile;
    sCtx.nLine = 1;
    if( sCtx.zFile[0]=='|' ){
#ifdef SQLITE_OMIT_POPEN
      fprintf(stderr, "Error: pipes are not supported in this OS\n");
      return 1;
#else
      sCtx.in = popen(sCtx.zFile+1, "r");
      sCtx.zFile = "<pipe>";
      xCloser = pclose;
#endif
    }else{
      sCtx.in = fopen(sCtx.zFile, "rb");
      xCloser = fclose;
    }
    if( p->mode==MODE_Ascii ){
      xRead = ascii_read_one_field;
    }else{
      xRead = csv_read_one_field;
    }
    if( sCtx.in==0 ){
      fprintf(stderr, "Error: cannot open \"%s\"\n", zFile);
      return 1;
    }
    sCtx.cColSep = p->colSeparator[0];
    sCtx.cRowSep = p->rowSeparator[0];
    zSql = sqlite3_mprintf("SELECT * FROM %s", zTable);
    if( zSql==0 ){
      fprintf(stderr, "Error: out of memory\n");
      xCloser(sCtx.in);
      return 1;
    }
    nByte = strlen30(zSql);
    rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    import_append_char(&sCtx, 0);    /* To ensure sCtx.z is allocated */
    if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
      char *zCreate = sqlite3_mprintf("CREATE TABLE %s", zTable);
      char cSep = '(';
      while( xRead(&sCtx) ){
        zCreate = sqlite3_mprintf("%z%c\n  \"%s\" TEXT", zCreate, cSep, sCtx.z);
        cSep = ',';
        if( sCtx.cTerm!=sCtx.cColSep ) break;
      }
      if( cSep=='(' ){
        sqlite3_free(zCreate);
        sqlite3_free(sCtx.z);
        xCloser(sCtx.in);
        fprintf(stderr,"%s: empty file\n", sCtx.zFile);
        return 1;
      }
      zCreate = sqlite3_mprintf("%z\n)", zCreate);
      rc = sqlite3_exec(p->db, zCreate, 0, 0, 0);
      sqlite3_free(zCreate);
      if( rc ){
        fprintf(stderr, "CREATE TABLE %s(...) failed: %s\n", zTable,
                sqlite3_errmsg(p->db));
        sqlite3_free(sCtx.z);
        xCloser(sCtx.in);
        return 1;
      }
      rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    }
    sqlite3_free(zSql);
    if( rc ){
      if (pStmt) sqlite3_finalize(pStmt);
      fprintf(stderr,"Error: %s\n", sqlite3_errmsg(p->db));
      xCloser(sCtx.in);
      return 1;
    }
    nCol = sqlite3_column_count(pStmt);
    sqlite3_finalize(pStmt);
    pStmt = 0;
    if( nCol==0 ) return 0; /* no columns, no error */
    zSql = sqlite3_malloc64( nByte*2 + 20 + nCol*2 );
    if( zSql==0 ){
      fprintf(stderr, "Error: out of memory\n");
      xCloser(sCtx.in);
      return 1;
    }
    sqlite3_snprintf(nByte+20, zSql, "INSERT INTO \"%w\" VALUES(?", zTable);
    j = strlen30(zSql);
    for(i=1; i<nCol; i++){
      zSql[j++] = ',';
      zSql[j++] = '?';
    }
    zSql[j++] = ')';
    zSql[j] = 0;
    rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    sqlite3_free(zSql);
    if( rc ){
      fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      if (pStmt) sqlite3_finalize(pStmt);
      xCloser(sCtx.in);
      return 1;
    }
    needCommit = sqlite3_get_autocommit(p->db);
    if( needCommit ) sqlite3_exec(p->db, "BEGIN", 0, 0, 0);
    do{






|




|






|








|










|
|
|
|
|
|
<

<
<
<
<
>
|
|
<

|
<
<
<
<
<
<
<
<
<
|
>
|
>
|
|
<








|






|



















|

|


|









|







|





|

















|









>
|



|





|











|







|
















|






|


















|






|










|









|














|







2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003

3004




3005
3006
3007

3008
3009









3010
3011
3012
3013
3014
3015

3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
          "  AND type IN ('index','trigger','view')"
          "  AND tbl_name LIKE shellstatic()", 0
        );
        zShellStatic = 0;
      }
    }
    if( p->writableSchema ){
      raw_printf(p->out, "PRAGMA writable_schema=OFF;\n");
      p->writableSchema = 0;
    }
    sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
    sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0);
    raw_printf(p->out, p->nErr ? "ROLLBACK; -- due to errors\n" : "COMMIT;\n");
  }else

  if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
    if( nArg==2 ){
      p->echoOn = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .echo on|off\n");
      rc = 1;
    }
  }else

  if( c=='e' && strncmp(azArg[0], "eqp", n)==0 ){
    if( nArg==2 ){
      p->autoEQP = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .eqp on|off\n");
      rc = 1;
    }   
  }else

  if( c=='e' && strncmp(azArg[0], "exit", n)==0 ){
    if( nArg>1 && (rc = (int)integerValue(azArg[1]))!=0 ) exit(rc);
    rc = 2;
  }else

  if( c=='e' && strncmp(azArg[0], "explain", n)==0 ){
    int val = 1;
    if( nArg>=2 ){
      if( strcmp(azArg[1],"auto")==0 ){
        val = 99;
      }else{
        val =  booleanValue(azArg[1]);

      }




    }
    if( val==1 && p->mode!=MODE_Explain ){
      p->normalMode = p->mode;

      p->mode = MODE_Explain;
      p->autoExplain = 0;









    }else if( val==0 ){
      if( p->mode==MODE_Explain ) p->mode = p->normalMode;
      p->autoExplain = 0;
    }else if( val==99 ){
      if( p->mode==MODE_Explain ) p->mode = p->normalMode;
      p->autoExplain = 1;

    }
  }else

  if( c=='f' && strncmp(azArg[0], "fullschema", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    int doStats = 0;
    if( nArg!=1 ){
      raw_printf(stderr, "Usage: .fullschema\n");
      rc = 1;
      goto meta_command_exit;
    }
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.cMode = data.mode = MODE_Semi;
    rc = sqlite3_exec(p->db,
       "SELECT sql FROM"
       "  (SELECT sql sql, type type, tbl_name tbl_name, name name, rowid x"
       "     FROM sqlite_master UNION ALL"
       "   SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
       "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
       "ORDER BY rowid",
       callback, &data, &zErrMsg
    );
    if( rc==SQLITE_OK ){
      sqlite3_stmt *pStmt;
      rc = sqlite3_prepare_v2(p->db,
               "SELECT rowid FROM sqlite_master"
               " WHERE name GLOB 'sqlite_stat[134]'",
               -1, &pStmt, 0);
      doStats = sqlite3_step(pStmt)==SQLITE_ROW;
      sqlite3_finalize(pStmt);
    }
    if( doStats==0 ){
      raw_printf(p->out, "/* No STAT tables available */\n");
    }else{
      raw_printf(p->out, "ANALYZE sqlite_master;\n");
      sqlite3_exec(p->db, "SELECT 'ANALYZE sqlite_master'",
                   callback, &data, &zErrMsg);
      data.cMode = data.mode = MODE_Insert;
      data.zDestTable = "sqlite_stat1";
      shell_exec(p->db, "SELECT * FROM sqlite_stat1",
                 shell_callback, &data,&zErrMsg);
      data.zDestTable = "sqlite_stat3";
      shell_exec(p->db, "SELECT * FROM sqlite_stat3",
                 shell_callback, &data,&zErrMsg);
      data.zDestTable = "sqlite_stat4";
      shell_exec(p->db, "SELECT * FROM sqlite_stat4",
                 shell_callback, &data, &zErrMsg);
      raw_printf(p->out, "ANALYZE sqlite_master;\n");
    }
  }else

  if( c=='h' && strncmp(azArg[0], "headers", n)==0 ){
    if( nArg==2 ){
      p->showHeader = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .headers on|off\n");
      rc = 1;
    }
  }else

  if( c=='h' && strncmp(azArg[0], "help", n)==0 ){
    utf8_printf(p->out, "%s", zHelp);
  }else

  if( c=='i' && strncmp(azArg[0], "import", n)==0 ){
    char *zTable;               /* Insert data into this table */
    char *zFile;                /* Name of file to extra content from */
    sqlite3_stmt *pStmt = NULL; /* A statement */
    int nCol;                   /* Number of columns in the table */
    int nByte;                  /* Number of bytes in an SQL string */
    int i, j;                   /* Loop counters */
    int needCommit;             /* True to COMMIT or ROLLBACK at end */
    int nSep;                   /* Number of bytes in p->colSeparator[] */
    char *zSql;                 /* An SQL statement */
    ImportCtx sCtx;             /* Reader context */
    char *(SQLITE_CDECL *xRead)(ImportCtx*); /* Func to read one value */
    int (SQLITE_CDECL *xCloser)(FILE*);      /* Func to close file */

    if( nArg!=3 ){
      raw_printf(stderr, "Usage: .import FILE TABLE\n");
      goto meta_command_exit;
    }
    zFile = azArg[1];
    zTable = azArg[2];
    seenInterrupt = 0;
    memset(&sCtx, 0, sizeof(sCtx));
    open_db(p, 0);
    nSep = strlen30(p->colSeparator);
    if( nSep==0 ){
      raw_printf(stderr,
                 "Error: non-null column separator required for import\n");
      return 1;
    }
    if( nSep>1 ){
      raw_printf(stderr, "Error: multi-character column separators not allowed"
                      " for import\n");
      return 1;
    }
    nSep = strlen30(p->rowSeparator);
    if( nSep==0 ){
      raw_printf(stderr, "Error: non-null row separator required for import\n");
      return 1;
    }
    if( nSep==2 && p->mode==MODE_Csv && strcmp(p->rowSeparator, SEP_CrLf)==0 ){
      /* When importing CSV (only), if the row separator is set to the
      ** default output row separator, change it to the default input
      ** row separator.  This avoids having to maintain different input
      ** and output row separators. */
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Row);
      nSep = strlen30(p->rowSeparator);
    }
    if( nSep>1 ){
      raw_printf(stderr, "Error: multi-character row separators not allowed"
                      " for import\n");
      return 1;
    }
    sCtx.zFile = zFile;
    sCtx.nLine = 1;
    if( sCtx.zFile[0]=='|' ){
#ifdef SQLITE_OMIT_POPEN
      raw_printf(stderr, "Error: pipes are not supported in this OS\n");
      return 1;
#else
      sCtx.in = popen(sCtx.zFile+1, "r");
      sCtx.zFile = "<pipe>";
      xCloser = pclose;
#endif
    }else{
      sCtx.in = fopen(sCtx.zFile, "rb");
      xCloser = fclose;
    }
    if( p->mode==MODE_Ascii ){
      xRead = ascii_read_one_field;
    }else{
      xRead = csv_read_one_field;
    }
    if( sCtx.in==0 ){
      utf8_printf(stderr, "Error: cannot open \"%s\"\n", zFile);
      return 1;
    }
    sCtx.cColSep = p->colSeparator[0];
    sCtx.cRowSep = p->rowSeparator[0];
    zSql = sqlite3_mprintf("SELECT * FROM %s", zTable);
    if( zSql==0 ){
      raw_printf(stderr, "Error: out of memory\n");
      xCloser(sCtx.in);
      return 1;
    }
    nByte = strlen30(zSql);
    rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    import_append_char(&sCtx, 0);    /* To ensure sCtx.z is allocated */
    if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
      char *zCreate = sqlite3_mprintf("CREATE TABLE %s", zTable);
      char cSep = '(';
      while( xRead(&sCtx) ){
        zCreate = sqlite3_mprintf("%z%c\n  \"%s\" TEXT", zCreate, cSep, sCtx.z);
        cSep = ',';
        if( sCtx.cTerm!=sCtx.cColSep ) break;
      }
      if( cSep=='(' ){
        sqlite3_free(zCreate);
        sqlite3_free(sCtx.z);
        xCloser(sCtx.in);
        utf8_printf(stderr,"%s: empty file\n", sCtx.zFile);
        return 1;
      }
      zCreate = sqlite3_mprintf("%z\n)", zCreate);
      rc = sqlite3_exec(p->db, zCreate, 0, 0, 0);
      sqlite3_free(zCreate);
      if( rc ){
        utf8_printf(stderr, "CREATE TABLE %s(...) failed: %s\n", zTable,
                sqlite3_errmsg(p->db));
        sqlite3_free(sCtx.z);
        xCloser(sCtx.in);
        return 1;
      }
      rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    }
    sqlite3_free(zSql);
    if( rc ){
      if (pStmt) sqlite3_finalize(pStmt);
      utf8_printf(stderr,"Error: %s\n", sqlite3_errmsg(p->db));
      xCloser(sCtx.in);
      return 1;
    }
    nCol = sqlite3_column_count(pStmt);
    sqlite3_finalize(pStmt);
    pStmt = 0;
    if( nCol==0 ) return 0; /* no columns, no error */
    zSql = sqlite3_malloc64( nByte*2 + 20 + nCol*2 );
    if( zSql==0 ){
      raw_printf(stderr, "Error: out of memory\n");
      xCloser(sCtx.in);
      return 1;
    }
    sqlite3_snprintf(nByte+20, zSql, "INSERT INTO \"%w\" VALUES(?", zTable);
    j = strlen30(zSql);
    for(i=1; i<nCol; i++){
      zSql[j++] = ',';
      zSql[j++] = '?';
    }
    zSql[j++] = ')';
    zSql[j] = 0;
    rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    sqlite3_free(zSql);
    if( rc ){
      utf8_printf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      if (pStmt) sqlite3_finalize(pStmt);
      xCloser(sCtx.in);
      return 1;
    }
    needCommit = sqlite3_get_autocommit(p->db);
    if( needCommit ) sqlite3_exec(p->db, "BEGIN", 0, 0, 0);
    do{
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
        ** Did we reach end-of-file OR end-of-line before finding any
        ** columns in ASCII mode?  If so, stop instead of NULL filling
        ** the remaining columns.
        */
        if( p->mode==MODE_Ascii && (z==0 || z[0]==0) && i==0 ) break;
        sqlite3_bind_text(pStmt, i+1, z, -1, SQLITE_TRANSIENT);
        if( i<nCol-1 && sCtx.cTerm!=sCtx.cColSep ){
          fprintf(stderr, "%s:%d: expected %d columns but found %d - "
                          "filling the rest with NULL\n",
                          sCtx.zFile, startLine, nCol, i+1);
          i += 2;
          while( i<=nCol ){ sqlite3_bind_null(pStmt, i); i++; }
        }
      }
      if( sCtx.cTerm==sCtx.cColSep ){
        do{
          xRead(&sCtx);
          i++;
        }while( sCtx.cTerm==sCtx.cColSep );
        fprintf(stderr, "%s:%d: expected %d columns but found %d - "
                        "extras ignored\n",
                        sCtx.zFile, startLine, nCol, i);
      }
      if( i>=nCol ){
        sqlite3_step(pStmt);
        rc = sqlite3_reset(pStmt);
        if( rc!=SQLITE_OK ){
          fprintf(stderr, "%s:%d: INSERT failed: %s\n", sCtx.zFile, startLine,
                  sqlite3_errmsg(p->db));
        }
      }
    }while( sCtx.cTerm!=EOF );

    xCloser(sCtx.in);
    sqlite3_free(sCtx.z);
    sqlite3_finalize(pStmt);
    if( needCommit ) sqlite3_exec(p->db, "COMMIT", 0, 0, 0);
  }else

  if( c=='i' && (strncmp(azArg[0], "indices", n)==0
                 || strncmp(azArg[0], "indexes", n)==0) ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.mode = MODE_List;
    if( nArg==1 ){
      rc = sqlite3_exec(p->db,
        "SELECT name FROM sqlite_master "
        "WHERE type='index' AND name NOT LIKE 'sqlite_%' "
        "UNION ALL "
        "SELECT name FROM sqlite_temp_master "
        "WHERE type='index' "






|











|







|
|

















|







3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
        ** Did we reach end-of-file OR end-of-line before finding any
        ** columns in ASCII mode?  If so, stop instead of NULL filling
        ** the remaining columns.
        */
        if( p->mode==MODE_Ascii && (z==0 || z[0]==0) && i==0 ) break;
        sqlite3_bind_text(pStmt, i+1, z, -1, SQLITE_TRANSIENT);
        if( i<nCol-1 && sCtx.cTerm!=sCtx.cColSep ){
          utf8_printf(stderr, "%s:%d: expected %d columns but found %d - "
                          "filling the rest with NULL\n",
                          sCtx.zFile, startLine, nCol, i+1);
          i += 2;
          while( i<=nCol ){ sqlite3_bind_null(pStmt, i); i++; }
        }
      }
      if( sCtx.cTerm==sCtx.cColSep ){
        do{
          xRead(&sCtx);
          i++;
        }while( sCtx.cTerm==sCtx.cColSep );
        utf8_printf(stderr, "%s:%d: expected %d columns but found %d - "
                        "extras ignored\n",
                        sCtx.zFile, startLine, nCol, i);
      }
      if( i>=nCol ){
        sqlite3_step(pStmt);
        rc = sqlite3_reset(pStmt);
        if( rc!=SQLITE_OK ){
          utf8_printf(stderr, "%s:%d: INSERT failed: %s\n", sCtx.zFile,
                      startLine, sqlite3_errmsg(p->db));
        }
      }
    }while( sCtx.cTerm!=EOF );

    xCloser(sCtx.in);
    sqlite3_free(sCtx.z);
    sqlite3_finalize(pStmt);
    if( needCommit ) sqlite3_exec(p->db, "COMMIT", 0, 0, 0);
  }else

  if( c=='i' && (strncmp(azArg[0], "indices", n)==0
                 || strncmp(azArg[0], "indexes", n)==0) ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.cMode = data.mode = MODE_List;
    if( nArg==1 ){
      rc = sqlite3_exec(p->db,
        "SELECT name FROM sqlite_master "
        "WHERE type='index' AND name NOT LIKE 'sqlite_%' "
        "UNION ALL "
        "SELECT name FROM sqlite_temp_master "
        "WHERE type='index' "
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185

3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
        "SELECT name FROM sqlite_temp_master "
        "WHERE type='index' AND tbl_name LIKE shellstatic() "
        "ORDER BY 1",
        callback, &data, &zErrMsg
      );
      zShellStatic = 0;
    }else{
      fprintf(stderr, "Usage: .indexes ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    if( zErrMsg ){
      fprintf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }else if( rc != SQLITE_OK ){

      fprintf(stderr,"Error: querying sqlite_master and sqlite_temp_master\n");
      rc = 1;
    }
  }else

#ifdef SQLITE_ENABLE_IOTRACE
  if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){
    SQLITE_API extern void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...);
    if( iotrace && iotrace!=stdout ) fclose(iotrace);
    iotrace = 0;
    if( nArg<2 ){
      sqlite3IoTrace = 0;
    }else if( strcmp(azArg[1], "-")==0 ){
      sqlite3IoTrace = iotracePrintf;
      iotrace = stdout;
    }else{
      iotrace = fopen(azArg[1], "w");
      if( iotrace==0 ){
        fprintf(stderr, "Error: cannot open \"%s\"\n", azArg[1]);
        sqlite3IoTrace = 0;
        rc = 1;
      }else{
        sqlite3IoTrace = iotracePrintf;
      }
    }
  }else






|




|



>
|

















|







3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
        "SELECT name FROM sqlite_temp_master "
        "WHERE type='index' AND tbl_name LIKE shellstatic() "
        "ORDER BY 1",
        callback, &data, &zErrMsg
      );
      zShellStatic = 0;
    }else{
      raw_printf(stderr, "Usage: .indexes ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    if( zErrMsg ){
      utf8_printf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }else if( rc != SQLITE_OK ){
      raw_printf(stderr,
                 "Error: querying sqlite_master and sqlite_temp_master\n");
      rc = 1;
    }
  }else

#ifdef SQLITE_ENABLE_IOTRACE
  if( c=='i' && strncmp(azArg[0], "iotrace", n)==0 ){
    SQLITE_API extern void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...);
    if( iotrace && iotrace!=stdout ) fclose(iotrace);
    iotrace = 0;
    if( nArg<2 ){
      sqlite3IoTrace = 0;
    }else if( strcmp(azArg[1], "-")==0 ){
      sqlite3IoTrace = iotracePrintf;
      iotrace = stdout;
    }else{
      iotrace = fopen(azArg[1], "w");
      if( iotrace==0 ){
        utf8_printf(stderr, "Error: cannot open \"%s\"\n", azArg[1]);
        sqlite3IoTrace = 0;
        rc = 1;
      }else{
        sqlite3IoTrace = iotracePrintf;
      }
    }
  }else
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
    open_db(p, 0);
    if( nArg==1 ){
      for(i=0; i<ArraySize(aLimit); i++){
        printf("%20s %d\n", aLimit[i].zLimitName, 
               sqlite3_limit(p->db, aLimit[i].limitCode, -1));
      }
    }else if( nArg>3 ){
      fprintf(stderr, "Usage: .limit NAME ?NEW-VALUE?\n");
      rc = 1;
      goto meta_command_exit;
    }else{
      int iLimit = -1;
      n2 = strlen30(azArg[1]);
      for(i=0; i<ArraySize(aLimit); i++){
        if( sqlite3_strnicmp(aLimit[i].zLimitName, azArg[1], n2)==0 ){
          if( iLimit<0 ){
            iLimit = i;
          }else{
            fprintf(stderr, "ambiguous limit: \"%s\"\n", azArg[1]);
            rc = 1;
            goto meta_command_exit;
          }
        }
      }
      if( iLimit<0 ){
        fprintf(stderr, "unknown limit: \"%s\"\n"
                        "enter \".limits\" with no arguments for a list.\n",
                         azArg[1]);
        rc = 1;
        goto meta_command_exit;
      }
      if( nArg==3 ){
        sqlite3_limit(p->db, aLimit[iLimit].limitCode,
                      (int)integerValue(azArg[2]));
      }
      printf("%20s %d\n", aLimit[iLimit].zLimitName,
             sqlite3_limit(p->db, aLimit[iLimit].limitCode, -1));
    }
  }else

#ifndef SQLITE_OMIT_LOAD_EXTENSION
  if( c=='l' && strncmp(azArg[0], "load", n)==0 ){
    const char *zFile, *zProc;
    char *zErrMsg = 0;
    if( nArg<2 ){
      fprintf(stderr, "Usage: .load FILE ?ENTRYPOINT?\n");
      rc = 1;
      goto meta_command_exit;
    }
    zFile = azArg[1];
    zProc = nArg>=3 ? azArg[2] : 0;
    open_db(p, 0);
    rc = sqlite3_load_extension(p->db, zFile, zProc, &zErrMsg);
    if( rc!=SQLITE_OK ){
      fprintf(stderr, "Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }
  }else
#endif

  if( c=='l' && strncmp(azArg[0], "log", n)==0 ){
    if( nArg!=2 ){
      fprintf(stderr, "Usage: .log FILENAME\n");
      rc = 1;
    }else{
      const char *zFile = azArg[1];
      output_file_close(p->pLog);
      p->pLog = output_file_open(zFile);
    }
  }else






|










|






|



















|








|








|







3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
    open_db(p, 0);
    if( nArg==1 ){
      for(i=0; i<ArraySize(aLimit); i++){
        printf("%20s %d\n", aLimit[i].zLimitName, 
               sqlite3_limit(p->db, aLimit[i].limitCode, -1));
      }
    }else if( nArg>3 ){
      raw_printf(stderr, "Usage: .limit NAME ?NEW-VALUE?\n");
      rc = 1;
      goto meta_command_exit;
    }else{
      int iLimit = -1;
      n2 = strlen30(azArg[1]);
      for(i=0; i<ArraySize(aLimit); i++){
        if( sqlite3_strnicmp(aLimit[i].zLimitName, azArg[1], n2)==0 ){
          if( iLimit<0 ){
            iLimit = i;
          }else{
            utf8_printf(stderr, "ambiguous limit: \"%s\"\n", azArg[1]);
            rc = 1;
            goto meta_command_exit;
          }
        }
      }
      if( iLimit<0 ){
        utf8_printf(stderr, "unknown limit: \"%s\"\n"
                        "enter \".limits\" with no arguments for a list.\n",
                         azArg[1]);
        rc = 1;
        goto meta_command_exit;
      }
      if( nArg==3 ){
        sqlite3_limit(p->db, aLimit[iLimit].limitCode,
                      (int)integerValue(azArg[2]));
      }
      printf("%20s %d\n", aLimit[iLimit].zLimitName,
             sqlite3_limit(p->db, aLimit[iLimit].limitCode, -1));
    }
  }else

#ifndef SQLITE_OMIT_LOAD_EXTENSION
  if( c=='l' && strncmp(azArg[0], "load", n)==0 ){
    const char *zFile, *zProc;
    char *zErrMsg = 0;
    if( nArg<2 ){
      raw_printf(stderr, "Usage: .load FILE ?ENTRYPOINT?\n");
      rc = 1;
      goto meta_command_exit;
    }
    zFile = azArg[1];
    zProc = nArg>=3 ? azArg[2] : 0;
    open_db(p, 0);
    rc = sqlite3_load_extension(p->db, zFile, zProc, &zErrMsg);
    if( rc!=SQLITE_OK ){
      utf8_printf(stderr, "Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }
  }else
#endif

  if( c=='l' && strncmp(azArg[0], "log", n)==0 ){
    if( nArg!=2 ){
      raw_printf(stderr, "Usage: .log FILENAME\n");
      rc = 1;
    }else{
      const char *zFile = azArg[1];
      output_file_close(p->pLog);
      p->pLog = output_file_open(zFile);
    }
  }else
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337

3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
      p->mode = MODE_Insert;
      set_table_name(p, nArg>=3 ? azArg[2] : "table");
    }else if( c2=='a' && strncmp(azArg[1],"ascii",n2)==0 ){
      p->mode = MODE_Ascii;
      sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Unit);
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Record);
    }else {
      fprintf(stderr,"Error: mode should be one of: "
         "ascii column csv html insert line list tabs tcl\n");
      rc = 1;
    }

  }else

  if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 ){
    if( nArg==2 ){
      sqlite3_snprintf(sizeof(p->nullValue), p->nullValue,
                       "%.*s", (int)ArraySize(p->nullValue)-1, azArg[1]);
    }else{
      fprintf(stderr, "Usage: .nullvalue STRING\n");
      rc = 1;
    }
  }else

  if( c=='o' && strncmp(azArg[0], "open", n)==0 && n>=2 ){
    sqlite3 *savedDb = p->db;
    const char *zSavedFilename = p->zDbFilename;






|



>







|







3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
      p->mode = MODE_Insert;
      set_table_name(p, nArg>=3 ? azArg[2] : "table");
    }else if( c2=='a' && strncmp(azArg[1],"ascii",n2)==0 ){
      p->mode = MODE_Ascii;
      sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator, SEP_Unit);
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator, SEP_Record);
    }else {
      raw_printf(stderr, "Error: mode should be one of: "
         "ascii column csv html insert line list tabs tcl\n");
      rc = 1;
    }
    p->cMode = p->mode;
  }else

  if( c=='n' && strncmp(azArg[0], "nullvalue", n)==0 ){
    if( nArg==2 ){
      sqlite3_snprintf(sizeof(p->nullValue), p->nullValue,
                       "%.*s", (int)ArraySize(p->nullValue)-1, azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .nullvalue STRING\n");
      rc = 1;
    }
  }else

  if( c=='o' && strncmp(azArg[0], "open", n)==0 && n>=2 ){
    sqlite3 *savedDb = p->db;
    const char *zSavedFilename = p->zDbFilename;
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
  }else

  if( c=='o'
   && (strncmp(azArg[0], "output", n)==0 || strncmp(azArg[0], "once", n)==0)
  ){
    const char *zFile = nArg>=2 ? azArg[1] : "stdout";
    if( nArg>2 ){
      fprintf(stderr, "Usage: .%s FILE\n", azArg[0]);
      rc = 1;
      goto meta_command_exit;
    }
    if( n>1 && strncmp(azArg[0], "once", n)==0 ){
      if( nArg<2 ){
        fprintf(stderr, "Usage: .once FILE\n");
        rc = 1;
        goto meta_command_exit;
      }
      p->outCount = 2;
    }else{
      p->outCount = 0;
    }
    output_reset(p);
    if( zFile[0]=='|' ){
#ifdef SQLITE_OMIT_POPEN
      fprintf(stderr,"Error: pipes are not supported in this OS\n");
      rc = 1;
      p->out = stdout;
#else
      p->out = popen(zFile + 1, "w");
      if( p->out==0 ){
        fprintf(stderr,"Error: cannot open pipe \"%s\"\n", zFile + 1);
        p->out = stdout;
        rc = 1;
      }else{
        sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
      }
#endif
    }else{
      p->out = output_file_open(zFile);
      if( p->out==0 ){
        if( strcmp(zFile,"off")!=0 ){
          fprintf(stderr,"Error: cannot write to \"%s\"\n", zFile);
        }
        p->out = stdout;
        rc = 1;
      } else {
        sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
      }
    }
  }else

  if( c=='p' && n>=3 && strncmp(azArg[0], "print", n)==0 ){
    int i;
    for(i=1; i<nArg; i++){
      if( i>1 ) fprintf(p->out, " ");
      fprintf(p->out, "%s", azArg[i]);
    }
    fprintf(p->out, "\n");
  }else

  if( c=='p' && strncmp(azArg[0], "prompt", n)==0 ){
    if( nArg >= 2) {
      strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1);
    }
    if( nArg >= 3) {
      strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1);
    }
  }else

  if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){
    rc = 2;
  }else

  if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 ){
    FILE *alt;
    if( nArg!=2 ){
      fprintf(stderr, "Usage: .read FILE\n");
      rc = 1;
      goto meta_command_exit;
    }
    alt = fopen(azArg[1], "rb");
    if( alt==0 ){
      fprintf(stderr,"Error: cannot open \"%s\"\n", azArg[1]);
      rc = 1;
    }else{
      rc = process_input(p, alt);
      fclose(alt);
    }
  }else







|





|










|





|










|












|
|

|


















|





|







3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
  }else

  if( c=='o'
   && (strncmp(azArg[0], "output", n)==0 || strncmp(azArg[0], "once", n)==0)
  ){
    const char *zFile = nArg>=2 ? azArg[1] : "stdout";
    if( nArg>2 ){
      utf8_printf(stderr, "Usage: .%s FILE\n", azArg[0]);
      rc = 1;
      goto meta_command_exit;
    }
    if( n>1 && strncmp(azArg[0], "once", n)==0 ){
      if( nArg<2 ){
        raw_printf(stderr, "Usage: .once FILE\n");
        rc = 1;
        goto meta_command_exit;
      }
      p->outCount = 2;
    }else{
      p->outCount = 0;
    }
    output_reset(p);
    if( zFile[0]=='|' ){
#ifdef SQLITE_OMIT_POPEN
      raw_printf(stderr, "Error: pipes are not supported in this OS\n");
      rc = 1;
      p->out = stdout;
#else
      p->out = popen(zFile + 1, "w");
      if( p->out==0 ){
        utf8_printf(stderr,"Error: cannot open pipe \"%s\"\n", zFile + 1);
        p->out = stdout;
        rc = 1;
      }else{
        sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
      }
#endif
    }else{
      p->out = output_file_open(zFile);
      if( p->out==0 ){
        if( strcmp(zFile,"off")!=0 ){
          utf8_printf(stderr,"Error: cannot write to \"%s\"\n", zFile);
        }
        p->out = stdout;
        rc = 1;
      } else {
        sqlite3_snprintf(sizeof(p->outfile), p->outfile, "%s", zFile);
      }
    }
  }else

  if( c=='p' && n>=3 && strncmp(azArg[0], "print", n)==0 ){
    int i;
    for(i=1; i<nArg; i++){
      if( i>1 ) raw_printf(p->out, " ");
      utf8_printf(p->out, "%s", azArg[i]);
    }
    raw_printf(p->out, "\n");
  }else

  if( c=='p' && strncmp(azArg[0], "prompt", n)==0 ){
    if( nArg >= 2) {
      strncpy(mainPrompt,azArg[1],(int)ArraySize(mainPrompt)-1);
    }
    if( nArg >= 3) {
      strncpy(continuePrompt,azArg[2],(int)ArraySize(continuePrompt)-1);
    }
  }else

  if( c=='q' && strncmp(azArg[0], "quit", n)==0 ){
    rc = 2;
  }else

  if( c=='r' && n>=3 && strncmp(azArg[0], "read", n)==0 ){
    FILE *alt;
    if( nArg!=2 ){
      raw_printf(stderr, "Usage: .read FILE\n");
      rc = 1;
      goto meta_command_exit;
    }
    alt = fopen(azArg[1], "rb");
    if( alt==0 ){
      utf8_printf(stderr,"Error: cannot open \"%s\"\n", azArg[1]);
      rc = 1;
    }else{
      rc = process_input(p, alt);
      fclose(alt);
    }
  }else

3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
    if( nArg==2 ){
      zSrcFile = azArg[1];
      zDb = "main";
    }else if( nArg==3 ){
      zSrcFile = azArg[2];
      zDb = azArg[1];
    }else{
      fprintf(stderr, "Usage: .restore ?DB? FILE\n");
      rc = 1;
      goto meta_command_exit;
    }
    rc = sqlite3_open(zSrcFile, &pSrc);
    if( rc!=SQLITE_OK ){
      fprintf(stderr, "Error: cannot open \"%s\"\n", zSrcFile);
      sqlite3_close(pSrc);
      return 1;
    }
    open_db(p, 0);
    pBackup = sqlite3_backup_init(p->db, zDb, pSrc, "main");
    if( pBackup==0 ){
      fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      sqlite3_close(pSrc);
      return 1;
    }
    while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK
          || rc==SQLITE_BUSY  ){
      if( rc==SQLITE_BUSY ){
        if( nTimeout++ >= 3 ) break;
        sqlite3_sleep(100);
      }
    }
    sqlite3_backup_finish(pBackup);
    if( rc==SQLITE_DONE ){
      rc = 0;
    }else if( rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){
      fprintf(stderr, "Error: source database is busy\n");
      rc = 1;
    }else{
      fprintf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      rc = 1;
    }
    sqlite3_close(pSrc);
  }else


  if( c=='s' && strncmp(azArg[0], "scanstats", n)==0 ){
    if( nArg==2 ){
      p->scanstatsOn = booleanValue(azArg[1]);
#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
      fprintf(stderr, "Warning: .scanstats not available in this build.\n");
#endif
    }else{
      fprintf(stderr, "Usage: .scanstats on|off\n");
      rc = 1;
    }
  }else

  if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.mode = MODE_Semi;
    if( nArg==2 ){
      int i;
      for(i=0; azArg[1][i]; i++) azArg[1][i] = ToLower(azArg[1][i]);
      if( strcmp(azArg[1],"sqlite_master")==0 ){
        char *new_argv[2], *new_colv[2];
        new_argv[0] = "CREATE TABLE sqlite_master (\n"
                      "  type text,\n"






|





|






|














|


|










|


|










|







3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
    if( nArg==2 ){
      zSrcFile = azArg[1];
      zDb = "main";
    }else if( nArg==3 ){
      zSrcFile = azArg[2];
      zDb = azArg[1];
    }else{
      raw_printf(stderr, "Usage: .restore ?DB? FILE\n");
      rc = 1;
      goto meta_command_exit;
    }
    rc = sqlite3_open(zSrcFile, &pSrc);
    if( rc!=SQLITE_OK ){
      utf8_printf(stderr, "Error: cannot open \"%s\"\n", zSrcFile);
      sqlite3_close(pSrc);
      return 1;
    }
    open_db(p, 0);
    pBackup = sqlite3_backup_init(p->db, zDb, pSrc, "main");
    if( pBackup==0 ){
      utf8_printf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      sqlite3_close(pSrc);
      return 1;
    }
    while( (rc = sqlite3_backup_step(pBackup,100))==SQLITE_OK
          || rc==SQLITE_BUSY  ){
      if( rc==SQLITE_BUSY ){
        if( nTimeout++ >= 3 ) break;
        sqlite3_sleep(100);
      }
    }
    sqlite3_backup_finish(pBackup);
    if( rc==SQLITE_DONE ){
      rc = 0;
    }else if( rc==SQLITE_BUSY || rc==SQLITE_LOCKED ){
      raw_printf(stderr, "Error: source database is busy\n");
      rc = 1;
    }else{
      utf8_printf(stderr, "Error: %s\n", sqlite3_errmsg(p->db));
      rc = 1;
    }
    sqlite3_close(pSrc);
  }else


  if( c=='s' && strncmp(azArg[0], "scanstats", n)==0 ){
    if( nArg==2 ){
      p->scanstatsOn = booleanValue(azArg[1]);
#ifndef SQLITE_ENABLE_STMT_SCANSTATUS
      raw_printf(stderr, "Warning: .scanstats not available in this build.\n");
#endif
    }else{
      raw_printf(stderr, "Usage: .scanstats on|off\n");
      rc = 1;
    }
  }else

  if( c=='s' && strncmp(azArg[0], "schema", n)==0 ){
    ShellState data;
    char *zErrMsg = 0;
    open_db(p, 0);
    memcpy(&data, p, sizeof(data));
    data.showHeader = 0;
    data.cMode = data.mode = MODE_Semi;
    if( nArg==2 ){
      int i;
      for(i=0; azArg[1][i]; i++) azArg[1][i] = ToLower(azArg[1][i]);
      if( strcmp(azArg[1],"sqlite_master")==0 ){
        char *new_argv[2], *new_colv[2];
        new_argv[0] = "CREATE TABLE sqlite_master (\n"
                      "  type text,\n"
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
         "     FROM sqlite_master UNION ALL"
         "   SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
         "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
         "ORDER BY rowid",
         callback, &data, &zErrMsg
      );
    }else{
      fprintf(stderr, "Usage: .schema ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    if( zErrMsg ){
      fprintf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }else if( rc != SQLITE_OK ){
      fprintf(stderr,"Error: querying schema information\n");
      rc = 1;
    }else{
      rc = 0;
    }
  }else








|




|



|







3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
         "     FROM sqlite_master UNION ALL"
         "   SELECT sql, type, tbl_name, name, rowid FROM sqlite_temp_master) "
         "WHERE type!='meta' AND sql NOTNULL AND name NOT LIKE 'sqlite_%' "
         "ORDER BY rowid",
         callback, &data, &zErrMsg
      );
    }else{
      raw_printf(stderr, "Usage: .schema ?LIKE-PATTERN?\n");
      rc = 1;
      goto meta_command_exit;
    }
    if( zErrMsg ){
      utf8_printf(stderr,"Error: %s\n", zErrMsg);
      sqlite3_free(zErrMsg);
      rc = 1;
    }else if( rc != SQLITE_OK ){
      raw_printf(stderr,"Error: querying schema information\n");
      rc = 1;
    }else{
      rc = 0;
    }
  }else


3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675

3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714




3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739

3740
3741

3742

3743



3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764


3765


3766




3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781

3782
3783
3784
3785

3786
3787
3788
3789
3790
3791
3792
  /* Undocumented commands for internal testing.  Subject to change
  ** without notice. */
  if( c=='s' && n>=10 && strncmp(azArg[0], "selftest-", 9)==0 ){
    if( strncmp(azArg[0]+9, "boolean", n-9)==0 ){
      int i, v;
      for(i=1; i<nArg; i++){
        v = booleanValue(azArg[i]);
        fprintf(p->out, "%s: %d 0x%x\n", azArg[i], v, v);
      }
    }
    if( strncmp(azArg[0]+9, "integer", n-9)==0 ){
      int i; sqlite3_int64 v;
      for(i=1; i<nArg; i++){
        char zBuf[200];
        v = integerValue(azArg[i]);
        sqlite3_snprintf(sizeof(zBuf),zBuf,"%s: %lld 0x%llx\n", azArg[i],v,v);
        fprintf(p->out, "%s", zBuf);
      }
    }
  }else
#endif

  if( c=='s' && strncmp(azArg[0], "separator", n)==0 ){
    if( nArg<2 || nArg>3 ){
      fprintf(stderr, "Usage: .separator COL ?ROW?\n");
      rc = 1;
    }
    if( nArg>=2 ){
      sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator,
                       "%.*s", (int)ArraySize(p->colSeparator)-1, azArg[1]);
    }
    if( nArg>=3 ){
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator,
                       "%.*s", (int)ArraySize(p->rowSeparator)-1, azArg[2]);
    }
  }else

  if( c=='s'
   && (strncmp(azArg[0], "shell", n)==0 || strncmp(azArg[0],"system",n)==0)
  ){
    char *zCmd;
    int i, x;
    if( nArg<2 ){
      fprintf(stderr, "Usage: .system COMMAND\n");
      rc = 1;
      goto meta_command_exit;
    }
    zCmd = sqlite3_mprintf(strchr(azArg[1],' ')==0?"%s":"\"%s\"", azArg[1]);
    for(i=2; i<nArg; i++){
      zCmd = sqlite3_mprintf(strchr(azArg[i],' ')==0?"%z %s":"%z \"%s\"",
                             zCmd, azArg[i]);
    }
    x = system(zCmd);
    sqlite3_free(zCmd);
    if( x ) fprintf(stderr, "System command returns %d\n", x);
  }else

  if( c=='s' && strncmp(azArg[0], "show", n)==0 ){
    int i;
    if( nArg!=1 ){
      fprintf(stderr, "Usage: .show\n");
      rc = 1;
      goto meta_command_exit;
    }
    fprintf(p->out,"%12.12s: %s\n","echo", p->echoOn ? "on" : "off");
    fprintf(p->out,"%12.12s: %s\n","eqp", p->autoEQP ? "on" : "off");
    fprintf(p->out,"%9.9s: %s\n","explain", p->normalMode.valid ? "on" :"off");

    fprintf(p->out,"%12.12s: %s\n","headers", p->showHeader ? "on" : "off");
    fprintf(p->out,"%12.12s: %s\n","mode", modeDescr[p->mode]);
    fprintf(p->out,"%12.12s: ", "nullvalue");
      output_c_string(p->out, p->nullValue);
      fprintf(p->out, "\n");
    fprintf(p->out,"%12.12s: %s\n","output",
            strlen30(p->outfile) ? p->outfile : "stdout");
    fprintf(p->out,"%12.12s: ", "colseparator");
      output_c_string(p->out, p->colSeparator);
      fprintf(p->out, "\n");
    fprintf(p->out,"%12.12s: ", "rowseparator");
      output_c_string(p->out, p->rowSeparator);
      fprintf(p->out, "\n");
    fprintf(p->out,"%12.12s: %s\n","stats", p->statsOn ? "on" : "off");
    fprintf(p->out,"%12.12s: ","width");
    for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) {
      fprintf(p->out,"%d ",p->colWidth[i]);
    }
    fprintf(p->out,"\n");
  }else

  if( c=='s' && strncmp(azArg[0], "stats", n)==0 ){
    if( nArg==2 ){
      p->statsOn = booleanValue(azArg[1]);
    }else{
      fprintf(stderr, "Usage: .stats on|off\n");
      rc = 1;
    }
  }else

  if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 ){
    sqlite3_stmt *pStmt;
    char **azResult;
    int nRow, nAlloc;
    char *zSql = 0;
    int ii;
    open_db(p, 0);
    rc = sqlite3_prepare_v2(p->db, "PRAGMA database_list", -1, &pStmt, 0);
    if( rc ) return rc;




    zSql = sqlite3_mprintf(
        "SELECT name FROM sqlite_master"
        " WHERE type IN ('table','view')"
        "   AND name NOT LIKE 'sqlite_%%'"
        "   AND name LIKE ?1");
    while( sqlite3_step(pStmt)==SQLITE_ROW ){
      const char *zDbName = (const char*)sqlite3_column_text(pStmt, 1);
      if( zDbName==0 || strcmp(zDbName,"main")==0 ) continue;
      if( strcmp(zDbName,"temp")==0 ){
        zSql = sqlite3_mprintf(
                 "%z UNION ALL "
                 "SELECT 'temp.' || name FROM sqlite_temp_master"
                 " WHERE type IN ('table','view')"
                 "   AND name NOT LIKE 'sqlite_%%'"
                 "   AND name LIKE ?1", zSql);
      }else{
        zSql = sqlite3_mprintf(
                 "%z UNION ALL "
                 "SELECT '%q.' || name FROM \"%w\".sqlite_master"
                 " WHERE type IN ('table','view')"
                 "   AND name NOT LIKE 'sqlite_%%'"
                 "   AND name LIKE ?1", zSql, zDbName, zDbName);
      }
    }
    sqlite3_finalize(pStmt);

    zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
    rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);

    sqlite3_free(zSql);

    if( rc ) return rc;



    nRow = nAlloc = 0;
    azResult = 0;
    if( nArg>1 ){
      sqlite3_bind_text(pStmt, 1, azArg[1], -1, SQLITE_TRANSIENT);
    }else{
      sqlite3_bind_text(pStmt, 1, "%", -1, SQLITE_STATIC);
    }
    while( sqlite3_step(pStmt)==SQLITE_ROW ){
      if( nRow>=nAlloc ){
        char **azNew;
        int n2 = nAlloc*2 + 10;
        azNew = sqlite3_realloc64(azResult, sizeof(azResult[0])*n2);
        if( azNew==0 ){
          fprintf(stderr, "Error: out of memory\n");
          break;
        }
        nAlloc = n2;
        azResult = azNew;
      }
      azResult[nRow] = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0));
      if( azResult[nRow] ) nRow++;


    }


    sqlite3_finalize(pStmt);        




    if( nRow>0 ){
      int len, maxlen = 0;
      int i, j;
      int nPrintCol, nPrintRow;
      for(i=0; i<nRow; i++){
        len = strlen30(azResult[i]);
        if( len>maxlen ) maxlen = len;
      }
      nPrintCol = 80/(maxlen+2);
      if( nPrintCol<1 ) nPrintCol = 1;
      nPrintRow = (nRow + nPrintCol - 1)/nPrintCol;
      for(i=0; i<nPrintRow; i++){
        for(j=i; j<nRow; j+=nPrintRow){
          char *zSp = j<nPrintRow ? "" : "  ";
          fprintf(p->out, "%s%-*s", zSp, maxlen, azResult[j] ? azResult[j]:"");

        }
        fprintf(p->out, "\n");
      }
    }

    for(ii=0; ii<nRow; ii++) sqlite3_free(azResult[ii]);
    sqlite3_free(azResult);
  }else

  if( c=='t' && n>=8 && strncmp(azArg[0], "testctrl", n)==0 && nArg>=2 ){
    static const struct {
       const char *zCtrlName;   /* Name of a test-control option */






|








|







|


















|










|





|



|
|
|
>
|
|
|

|
|

|

|
|

|
|
|

|

|






|












|
>
>
>
>





|


















|
>
|
|
>

>
|
>
>
>













|






|
>
>
|
>
>
|
>
>
>
>
|













|
>

|


>







3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
  /* Undocumented commands for internal testing.  Subject to change
  ** without notice. */
  if( c=='s' && n>=10 && strncmp(azArg[0], "selftest-", 9)==0 ){
    if( strncmp(azArg[0]+9, "boolean", n-9)==0 ){
      int i, v;
      for(i=1; i<nArg; i++){
        v = booleanValue(azArg[i]);
        utf8_printf(p->out, "%s: %d 0x%x\n", azArg[i], v, v);
      }
    }
    if( strncmp(azArg[0]+9, "integer", n-9)==0 ){
      int i; sqlite3_int64 v;
      for(i=1; i<nArg; i++){
        char zBuf[200];
        v = integerValue(azArg[i]);
        sqlite3_snprintf(sizeof(zBuf),zBuf,"%s: %lld 0x%llx\n", azArg[i],v,v);
        utf8_printf(p->out, "%s", zBuf);
      }
    }
  }else
#endif

  if( c=='s' && strncmp(azArg[0], "separator", n)==0 ){
    if( nArg<2 || nArg>3 ){
      raw_printf(stderr, "Usage: .separator COL ?ROW?\n");
      rc = 1;
    }
    if( nArg>=2 ){
      sqlite3_snprintf(sizeof(p->colSeparator), p->colSeparator,
                       "%.*s", (int)ArraySize(p->colSeparator)-1, azArg[1]);
    }
    if( nArg>=3 ){
      sqlite3_snprintf(sizeof(p->rowSeparator), p->rowSeparator,
                       "%.*s", (int)ArraySize(p->rowSeparator)-1, azArg[2]);
    }
  }else

  if( c=='s'
   && (strncmp(azArg[0], "shell", n)==0 || strncmp(azArg[0],"system",n)==0)
  ){
    char *zCmd;
    int i, x;
    if( nArg<2 ){
      raw_printf(stderr, "Usage: .system COMMAND\n");
      rc = 1;
      goto meta_command_exit;
    }
    zCmd = sqlite3_mprintf(strchr(azArg[1],' ')==0?"%s":"\"%s\"", azArg[1]);
    for(i=2; i<nArg; i++){
      zCmd = sqlite3_mprintf(strchr(azArg[i],' ')==0?"%z %s":"%z \"%s\"",
                             zCmd, azArg[i]);
    }
    x = system(zCmd);
    sqlite3_free(zCmd);
    if( x ) raw_printf(stderr, "System command returns %d\n", x);
  }else

  if( c=='s' && strncmp(azArg[0], "show", n)==0 ){
    int i;
    if( nArg!=1 ){
      raw_printf(stderr, "Usage: .show\n");
      rc = 1;
      goto meta_command_exit;
    }
    utf8_printf(p->out, "%12.12s: %s\n","echo", p->echoOn ? "on" : "off");
    utf8_printf(p->out, "%12.12s: %s\n","eqp", p->autoEQP ? "on" : "off");
    utf8_printf(p->out, "%12.12s: %s\n","explain",
         p->mode==MODE_Explain ? "on" : p->autoExplain ? "auto" : "off");
    utf8_printf(p->out,"%12.12s: %s\n","headers", p->showHeader ? "on" : "off");
    utf8_printf(p->out, "%12.12s: %s\n","mode", modeDescr[p->mode]);
    utf8_printf(p->out, "%12.12s: ", "nullvalue");
      output_c_string(p->out, p->nullValue);
      raw_printf(p->out, "\n");
    utf8_printf(p->out,"%12.12s: %s\n","output",
            strlen30(p->outfile) ? p->outfile : "stdout");
    utf8_printf(p->out,"%12.12s: ", "colseparator");
      output_c_string(p->out, p->colSeparator);
      raw_printf(p->out, "\n");
    utf8_printf(p->out,"%12.12s: ", "rowseparator");
      output_c_string(p->out, p->rowSeparator);
      raw_printf(p->out, "\n");
    utf8_printf(p->out, "%12.12s: %s\n","stats", p->statsOn ? "on" : "off");
    utf8_printf(p->out, "%12.12s: ", "width");
    for (i=0;i<(int)ArraySize(p->colWidth) && p->colWidth[i] != 0;i++) {
      raw_printf(p->out, "%d ", p->colWidth[i]);
    }
    raw_printf(p->out, "\n");
  }else

  if( c=='s' && strncmp(azArg[0], "stats", n)==0 ){
    if( nArg==2 ){
      p->statsOn = booleanValue(azArg[1]);
    }else{
      raw_printf(stderr, "Usage: .stats on|off\n");
      rc = 1;
    }
  }else

  if( c=='t' && n>1 && strncmp(azArg[0], "tables", n)==0 ){
    sqlite3_stmt *pStmt;
    char **azResult;
    int nRow, nAlloc;
    char *zSql = 0;
    int ii;
    open_db(p, 0);
    rc = sqlite3_prepare_v2(p->db, "PRAGMA database_list", -1, &pStmt, 0);
    if( rc ) return shellDatabaseError(p->db);

    /* Create an SQL statement to query for the list of tables in the
    ** main and all attached databases where the table name matches the
    ** LIKE pattern bound to variable "?1". */
    zSql = sqlite3_mprintf(
        "SELECT name FROM sqlite_master"
        " WHERE type IN ('table','view')"
        "   AND name NOT LIKE 'sqlite_%%'"
        "   AND name LIKE ?1");
    while( zSql && sqlite3_step(pStmt)==SQLITE_ROW ){
      const char *zDbName = (const char*)sqlite3_column_text(pStmt, 1);
      if( zDbName==0 || strcmp(zDbName,"main")==0 ) continue;
      if( strcmp(zDbName,"temp")==0 ){
        zSql = sqlite3_mprintf(
                 "%z UNION ALL "
                 "SELECT 'temp.' || name FROM sqlite_temp_master"
                 " WHERE type IN ('table','view')"
                 "   AND name NOT LIKE 'sqlite_%%'"
                 "   AND name LIKE ?1", zSql);
      }else{
        zSql = sqlite3_mprintf(
                 "%z UNION ALL "
                 "SELECT '%q.' || name FROM \"%w\".sqlite_master"
                 " WHERE type IN ('table','view')"
                 "   AND name NOT LIKE 'sqlite_%%'"
                 "   AND name LIKE ?1", zSql, zDbName, zDbName);
      }
    }
    rc = sqlite3_finalize(pStmt);
    if( zSql && rc==SQLITE_OK ){
      zSql = sqlite3_mprintf("%z ORDER BY 1", zSql);
      if( zSql ) rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
    }
    sqlite3_free(zSql);
    if( !zSql ) return shellNomemError();
    if( rc ) return shellDatabaseError(p->db);

    /* Run the SQL statement prepared by the above block. Store the results
    ** as an array of nul-terminated strings in azResult[].  */
    nRow = nAlloc = 0;
    azResult = 0;
    if( nArg>1 ){
      sqlite3_bind_text(pStmt, 1, azArg[1], -1, SQLITE_TRANSIENT);
    }else{
      sqlite3_bind_text(pStmt, 1, "%", -1, SQLITE_STATIC);
    }
    while( sqlite3_step(pStmt)==SQLITE_ROW ){
      if( nRow>=nAlloc ){
        char **azNew;
        int n2 = nAlloc*2 + 10;
        azNew = sqlite3_realloc64(azResult, sizeof(azResult[0])*n2);
        if( azNew==0 ){
          rc = shellNomemError();
          break;
        }
        nAlloc = n2;
        azResult = azNew;
      }
      azResult[nRow] = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 0));
      if( 0==azResult[nRow] ){
        rc = shellNomemError();
        break;
      }
      nRow++;
    }
    if( sqlite3_finalize(pStmt)!=SQLITE_OK ){
      rc = shellDatabaseError(p->db);
    }

    /* Pretty-print the contents of array azResult[] to the output */
    if( rc==0 && nRow>0 ){
      int len, maxlen = 0;
      int i, j;
      int nPrintCol, nPrintRow;
      for(i=0; i<nRow; i++){
        len = strlen30(azResult[i]);
        if( len>maxlen ) maxlen = len;
      }
      nPrintCol = 80/(maxlen+2);
      if( nPrintCol<1 ) nPrintCol = 1;
      nPrintRow = (nRow + nPrintCol - 1)/nPrintCol;
      for(i=0; i<nPrintRow; i++){
        for(j=i; j<nRow; j+=nPrintRow){
          char *zSp = j<nPrintRow ? "" : "  ";
          utf8_printf(p->out, "%s%-*s", zSp, maxlen,
                      azResult[j] ? azResult[j]:"");
        }
        raw_printf(p->out, "\n");
      }
    }

    for(ii=0; ii<nRow; ii++) sqlite3_free(azResult[ii]);
    sqlite3_free(azResult);
  }else

  if( c=='t' && n>=8 && strncmp(azArg[0], "testctrl", n)==0 && nArg>=2 ){
    static const struct {
       const char *zCtrlName;   /* Name of a test-control option */
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859

3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896

3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919

3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
































4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
    ** of the option name, or a numerical value. */
    n2 = strlen30(azArg[1]);
    for(i=0; i<ArraySize(aCtrl); i++){
      if( strncmp(azArg[1], aCtrl[i].zCtrlName, n2)==0 ){
        if( testctrl<0 ){
          testctrl = aCtrl[i].ctrlCode;
        }else{
          fprintf(stderr, "ambiguous option name: \"%s\"\n", azArg[1]);
          testctrl = -1;
          break;
        }
      }
    }
    if( testctrl<0 ) testctrl = (int)integerValue(azArg[1]);
    if( (testctrl<SQLITE_TESTCTRL_FIRST) || (testctrl>SQLITE_TESTCTRL_LAST) ){
      fprintf(stderr,"Error: invalid testctrl option: %s\n", azArg[1]);
    }else{
      switch(testctrl){

        /* sqlite3_test_control(int, db, int) */
        case SQLITE_TESTCTRL_OPTIMIZATIONS:
        case SQLITE_TESTCTRL_RESERVE:             
          if( nArg==3 ){
            int opt = (int)strtol(azArg[2], 0, 0);        
            rc2 = sqlite3_test_control(testctrl, p->db, opt);
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            fprintf(stderr,"Error: testctrl %s takes a single int option\n",
                    azArg[1]);
          }
          break;

        /* sqlite3_test_control(int) */
        case SQLITE_TESTCTRL_PRNG_SAVE:
        case SQLITE_TESTCTRL_PRNG_RESTORE:
        case SQLITE_TESTCTRL_PRNG_RESET:
        case SQLITE_TESTCTRL_BYTEORDER:
          if( nArg==2 ){
            rc2 = sqlite3_test_control(testctrl);
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            fprintf(stderr,"Error: testctrl %s takes no options\n", azArg[1]);

          }
          break;

        /* sqlite3_test_control(int, uint) */
        case SQLITE_TESTCTRL_PENDING_BYTE:        
          if( nArg==3 ){
            unsigned int opt = (unsigned int)integerValue(azArg[2]);
            rc2 = sqlite3_test_control(testctrl, opt);
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            fprintf(stderr,"Error: testctrl %s takes a single unsigned"
                           " int option\n", azArg[1]);
          }
          break;
          
        /* sqlite3_test_control(int, int) */
        case SQLITE_TESTCTRL_ASSERT:              
        case SQLITE_TESTCTRL_ALWAYS:      
        case SQLITE_TESTCTRL_NEVER_CORRUPT:        
          if( nArg==3 ){
            int opt = booleanValue(azArg[2]);        
            rc2 = sqlite3_test_control(testctrl, opt);
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            fprintf(stderr,"Error: testctrl %s takes a single int option\n",
                            azArg[1]);
          }
          break;

        /* sqlite3_test_control(int, char *) */
#ifdef SQLITE_N_KEYWORD
        case SQLITE_TESTCTRL_ISKEYWORD:           
          if( nArg==3 ){
            const char *opt = azArg[2];        
            rc2 = sqlite3_test_control(testctrl, opt);
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {

            fprintf(stderr,"Error: testctrl %s takes a single char * option\n",
                            azArg[1]);
          }
          break;
#endif

        case SQLITE_TESTCTRL_IMPOSTER:
          if( nArg==5 ){
            rc2 = sqlite3_test_control(testctrl, p->db, 
                          azArg[2],
                          integerValue(azArg[3]),
                          integerValue(azArg[4]));
            fprintf(p->out, "%d (0x%08x)\n", rc2, rc2);
          }else{
            fprintf(stderr,"Usage: .testctrl imposter dbName onoff tnum\n");
          }
          break;

        case SQLITE_TESTCTRL_BITVEC_TEST:         
        case SQLITE_TESTCTRL_FAULT_INSTALL:       
        case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: 
        case SQLITE_TESTCTRL_SCRATCHMALLOC:       
        default:

          fprintf(stderr,"Error: CLI support for testctrl %s not implemented\n",
                  azArg[1]);
          break;
      }
    }
  }else

  if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 ){
    open_db(p, 0);
    sqlite3_busy_timeout(p->db, nArg>=2 ? (int)integerValue(azArg[1]) : 0);
  }else
    
  if( c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 ){
    if( nArg==2 ){
      enableTimer = booleanValue(azArg[1]);
      if( enableTimer && !HAS_TIMER ){
        fprintf(stderr, "Error: timer not available on this system.\n");
        enableTimer = 0;
      }
    }else{
      fprintf(stderr, "Usage: .timer on|off\n");
      rc = 1;
    }
  }else
  
  if( c=='t' && strncmp(azArg[0], "trace", n)==0 ){
    open_db(p, 0);
    if( nArg!=2 ){
      fprintf(stderr, "Usage: .trace FILE|off\n");
      rc = 1;
      goto meta_command_exit;
    }
    output_file_close(p->traceOut);
    p->traceOut = output_file_open(azArg[1]);
#if !defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_OMIT_FLOATING_POINT)
    if( p->traceOut==0 ){
      sqlite3_trace(p->db, 0, 0);
    }else{
      sqlite3_trace(p->db, sql_trace_callback, p->traceOut);
    }
#endif
  }else

#if SQLITE_USER_AUTHENTICATION
  if( c=='u' && strncmp(azArg[0], "user", n)==0 ){
    if( nArg<2 ){
      fprintf(stderr, "Usage: .user SUBCOMMAND ...\n");
      rc = 1;
      goto meta_command_exit;
    }
    open_db(p, 0);
    if( strcmp(azArg[1],"login")==0 ){
      if( nArg!=4 ){
        fprintf(stderr, "Usage: .user login USER PASSWORD\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_authenticate(p->db, azArg[2], azArg[3],
                                    (int)strlen(azArg[3]));
      if( rc ){
        fprintf(stderr, "Authentication failed for user %s\n", azArg[2]);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"add")==0 ){
      if( nArg!=5 ){
        fprintf(stderr, "Usage: .user add USER PASSWORD ISADMIN\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_add(p->db, azArg[2],
                            azArg[3], (int)strlen(azArg[3]),
                            booleanValue(azArg[4]));
      if( rc ){
        fprintf(stderr, "User-Add failed: %d\n", rc);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"edit")==0 ){
      if( nArg!=5 ){
        fprintf(stderr, "Usage: .user edit USER PASSWORD ISADMIN\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_change(p->db, azArg[2],
                              azArg[3], (int)strlen(azArg[3]),
                              booleanValue(azArg[4]));
      if( rc ){
        fprintf(stderr, "User-Edit failed: %d\n", rc);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"delete")==0 ){
      if( nArg!=3 ){
        fprintf(stderr, "Usage: .user delete USER\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_delete(p->db, azArg[2]);
      if( rc ){
        fprintf(stderr, "User-Delete failed: %d\n", rc);
        rc = 1;
      }
    }else{
      fprintf(stderr, "Usage: .user login|add|edit|delete ...\n");
      rc = 1;
      goto meta_command_exit;
    }    
  }else
#endif /* SQLITE_USER_AUTHENTICATION */

  if( c=='v' && strncmp(azArg[0], "version", n)==0 ){
    fprintf(p->out, "SQLite %s %s\n" /*extra-version-info*/,
        sqlite3_libversion(), sqlite3_sourceid());
  }else

































  if( c=='v' && strncmp(azArg[0], "vfsname", n)==0 ){
    const char *zDbName = nArg==2 ? azArg[1] : "main";
    char *zVfsName = 0;
    if( p->db ){
      sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFSNAME, &zVfsName);
      if( zVfsName ){
        fprintf(p->out, "%s\n", zVfsName);
        sqlite3_free(zVfsName);
      }
    }
  }else

#if defined(SQLITE_DEBUG) && defined(SQLITE_ENABLE_WHERETRACE)
  if( c=='w' && strncmp(azArg[0], "wheretrace", n)==0 ){






|







|









|

|











|

|
>








|

|











|

|










|

>
|
|










|

|








>
|
|














|



|







|

















|






|






|




|







|




|







|




|





|



|







|


>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







|







3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
    ** of the option name, or a numerical value. */
    n2 = strlen30(azArg[1]);
    for(i=0; i<ArraySize(aCtrl); i++){
      if( strncmp(azArg[1], aCtrl[i].zCtrlName, n2)==0 ){
        if( testctrl<0 ){
          testctrl = aCtrl[i].ctrlCode;
        }else{
          utf8_printf(stderr, "ambiguous option name: \"%s\"\n", azArg[1]);
          testctrl = -1;
          break;
        }
      }
    }
    if( testctrl<0 ) testctrl = (int)integerValue(azArg[1]);
    if( (testctrl<SQLITE_TESTCTRL_FIRST) || (testctrl>SQLITE_TESTCTRL_LAST) ){
      utf8_printf(stderr,"Error: invalid testctrl option: %s\n", azArg[1]);
    }else{
      switch(testctrl){

        /* sqlite3_test_control(int, db, int) */
        case SQLITE_TESTCTRL_OPTIMIZATIONS:
        case SQLITE_TESTCTRL_RESERVE:             
          if( nArg==3 ){
            int opt = (int)strtol(azArg[2], 0, 0);        
            rc2 = sqlite3_test_control(testctrl, p->db, opt);
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            utf8_printf(stderr,"Error: testctrl %s takes a single int option\n",
                    azArg[1]);
          }
          break;

        /* sqlite3_test_control(int) */
        case SQLITE_TESTCTRL_PRNG_SAVE:
        case SQLITE_TESTCTRL_PRNG_RESTORE:
        case SQLITE_TESTCTRL_PRNG_RESET:
        case SQLITE_TESTCTRL_BYTEORDER:
          if( nArg==2 ){
            rc2 = sqlite3_test_control(testctrl);
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            utf8_printf(stderr,"Error: testctrl %s takes no options\n",
                        azArg[1]);
          }
          break;

        /* sqlite3_test_control(int, uint) */
        case SQLITE_TESTCTRL_PENDING_BYTE:        
          if( nArg==3 ){
            unsigned int opt = (unsigned int)integerValue(azArg[2]);
            rc2 = sqlite3_test_control(testctrl, opt);
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            utf8_printf(stderr,"Error: testctrl %s takes a single unsigned"
                           " int option\n", azArg[1]);
          }
          break;
          
        /* sqlite3_test_control(int, int) */
        case SQLITE_TESTCTRL_ASSERT:              
        case SQLITE_TESTCTRL_ALWAYS:      
        case SQLITE_TESTCTRL_NEVER_CORRUPT:        
          if( nArg==3 ){
            int opt = booleanValue(azArg[2]);        
            rc2 = sqlite3_test_control(testctrl, opt);
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            utf8_printf(stderr,"Error: testctrl %s takes a single int option\n",
                            azArg[1]);
          }
          break;

        /* sqlite3_test_control(int, char *) */
#ifdef SQLITE_N_KEYWORD
        case SQLITE_TESTCTRL_ISKEYWORD:           
          if( nArg==3 ){
            const char *opt = azArg[2];        
            rc2 = sqlite3_test_control(testctrl, opt);
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          } else {
            utf8_printf(stderr,
                        "Error: testctrl %s takes a single char * option\n",
                        azArg[1]);
          }
          break;
#endif

        case SQLITE_TESTCTRL_IMPOSTER:
          if( nArg==5 ){
            rc2 = sqlite3_test_control(testctrl, p->db, 
                          azArg[2],
                          integerValue(azArg[3]),
                          integerValue(azArg[4]));
            raw_printf(p->out, "%d (0x%08x)\n", rc2, rc2);
          }else{
            raw_printf(stderr,"Usage: .testctrl imposter dbName onoff tnum\n");
          }
          break;

        case SQLITE_TESTCTRL_BITVEC_TEST:         
        case SQLITE_TESTCTRL_FAULT_INSTALL:       
        case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: 
        case SQLITE_TESTCTRL_SCRATCHMALLOC:       
        default:
          utf8_printf(stderr,
                      "Error: CLI support for testctrl %s not implemented\n",
                      azArg[1]);
          break;
      }
    }
  }else

  if( c=='t' && n>4 && strncmp(azArg[0], "timeout", n)==0 ){
    open_db(p, 0);
    sqlite3_busy_timeout(p->db, nArg>=2 ? (int)integerValue(azArg[1]) : 0);
  }else
    
  if( c=='t' && n>=5 && strncmp(azArg[0], "timer", n)==0 ){
    if( nArg==2 ){
      enableTimer = booleanValue(azArg[1]);
      if( enableTimer && !HAS_TIMER ){
        raw_printf(stderr, "Error: timer not available on this system.\n");
        enableTimer = 0;
      }
    }else{
      raw_printf(stderr, "Usage: .timer on|off\n");
      rc = 1;
    }
  }else
  
  if( c=='t' && strncmp(azArg[0], "trace", n)==0 ){
    open_db(p, 0);
    if( nArg!=2 ){
      raw_printf(stderr, "Usage: .trace FILE|off\n");
      rc = 1;
      goto meta_command_exit;
    }
    output_file_close(p->traceOut);
    p->traceOut = output_file_open(azArg[1]);
#if !defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_OMIT_FLOATING_POINT)
    if( p->traceOut==0 ){
      sqlite3_trace(p->db, 0, 0);
    }else{
      sqlite3_trace(p->db, sql_trace_callback, p->traceOut);
    }
#endif
  }else

#if SQLITE_USER_AUTHENTICATION
  if( c=='u' && strncmp(azArg[0], "user", n)==0 ){
    if( nArg<2 ){
      raw_printf(stderr, "Usage: .user SUBCOMMAND ...\n");
      rc = 1;
      goto meta_command_exit;
    }
    open_db(p, 0);
    if( strcmp(azArg[1],"login")==0 ){
      if( nArg!=4 ){
        raw_printf(stderr, "Usage: .user login USER PASSWORD\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_authenticate(p->db, azArg[2], azArg[3],
                                    (int)strlen(azArg[3]));
      if( rc ){
        utf8_printf(stderr, "Authentication failed for user %s\n", azArg[2]);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"add")==0 ){
      if( nArg!=5 ){
        raw_printf(stderr, "Usage: .user add USER PASSWORD ISADMIN\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_add(p->db, azArg[2],
                            azArg[3], (int)strlen(azArg[3]),
                            booleanValue(azArg[4]));
      if( rc ){
        raw_printf(stderr, "User-Add failed: %d\n", rc);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"edit")==0 ){
      if( nArg!=5 ){
        raw_printf(stderr, "Usage: .user edit USER PASSWORD ISADMIN\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_change(p->db, azArg[2],
                              azArg[3], (int)strlen(azArg[3]),
                              booleanValue(azArg[4]));
      if( rc ){
        raw_printf(stderr, "User-Edit failed: %d\n", rc);
        rc = 1;
      }
    }else if( strcmp(azArg[1],"delete")==0 ){
      if( nArg!=3 ){
        raw_printf(stderr, "Usage: .user delete USER\n");
        rc = 1;
        goto meta_command_exit;
      }
      rc = sqlite3_user_delete(p->db, azArg[2]);
      if( rc ){
        raw_printf(stderr, "User-Delete failed: %d\n", rc);
        rc = 1;
      }
    }else{
      raw_printf(stderr, "Usage: .user login|add|edit|delete ...\n");
      rc = 1;
      goto meta_command_exit;
    }    
  }else
#endif /* SQLITE_USER_AUTHENTICATION */

  if( c=='v' && strncmp(azArg[0], "version", n)==0 ){
    utf8_printf(p->out, "SQLite %s %s\n" /*extra-version-info*/,
        sqlite3_libversion(), sqlite3_sourceid());
  }else

  if( c=='v' && strncmp(azArg[0], "vfsinfo", n)==0 ){
    const char *zDbName = nArg==2 ? azArg[1] : "main";
    sqlite3_vfs *pVfs;
    if( p->db ){
      sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFS_POINTER, &pVfs);
      if( pVfs ){
        utf8_printf(p->out, "vfs.zName      = \"%s\"\n", pVfs->zName);
        raw_printf(p->out, "vfs.iVersion   = %d\n", pVfs->iVersion);
        raw_printf(p->out, "vfs.szOsFile   = %d\n", pVfs->szOsFile);
        raw_printf(p->out, "vfs.mxPathname = %d\n", pVfs->mxPathname);
      }
    }
  }else

  if( c=='v' && strncmp(azArg[0], "vfslist", n)==0 ){
    sqlite3_vfs *pVfs;
    sqlite3_vfs *pCurrent = 0;
    if( p->db ){
      sqlite3_file_control(p->db, "main", SQLITE_FCNTL_VFS_POINTER, &pCurrent);
    }
    for(pVfs=sqlite3_vfs_find(0); pVfs; pVfs=pVfs->pNext){
      utf8_printf(p->out, "vfs.zName      = \"%s\"%s\n", pVfs->zName,
           pVfs==pCurrent ? "  <--- CURRENT" : "");
      raw_printf(p->out, "vfs.iVersion   = %d\n", pVfs->iVersion);
      raw_printf(p->out, "vfs.szOsFile   = %d\n", pVfs->szOsFile);
      raw_printf(p->out, "vfs.mxPathname = %d\n", pVfs->mxPathname);
      if( pVfs->pNext ){
        raw_printf(p->out, "-----------------------------------\n");
      }
    }
  }else

  if( c=='v' && strncmp(azArg[0], "vfsname", n)==0 ){
    const char *zDbName = nArg==2 ? azArg[1] : "main";
    char *zVfsName = 0;
    if( p->db ){
      sqlite3_file_control(p->db, zDbName, SQLITE_FCNTL_VFSNAME, &zVfsName);
      if( zVfsName ){
        utf8_printf(p->out, "%s\n", zVfsName);
        sqlite3_free(zVfsName);
      }
    }
  }else

#if defined(SQLITE_DEBUG) && defined(SQLITE_ENABLE_WHERETRACE)
  if( c=='w' && strncmp(azArg[0], "wheretrace", n)==0 ){
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
    assert( nArg<=ArraySize(azArg) );
    for(j=1; j<nArg && j<ArraySize(p->colWidth); j++){
      p->colWidth[j-1] = (int)integerValue(azArg[j]);
    }
  }else

  {
    fprintf(stderr, "Error: unknown command or invalid arguments: "
      " \"%s\". Enter \".help\" for help\n", azArg[0]);
    rc = 1;
  }

meta_command_exit:
  if( p->outCount ){
    p->outCount--;






|







4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
    assert( nArg<=ArraySize(azArg) );
    for(j=1; j<nArg && j<ArraySize(p->colWidth); j++){
      p->colWidth[j-1] = (int)integerValue(azArg[j]);
    }
  }else

  {
    utf8_printf(stderr, "Error: unknown command or invalid arguments: "
      " \"%s\". Enter \".help\" for help\n", azArg[0]);
    rc = 1;
  }

meta_command_exit:
  if( p->outCount ){
    p->outCount--;
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
      memcpy(zLine,";",2);
    }
    nLine = strlen30(zLine);
    if( nSql+nLine+2>=nAlloc ){
      nAlloc = nSql+nLine+100;
      zSql = realloc(zSql, nAlloc);
      if( zSql==0 ){
        fprintf(stderr, "Error: out of memory\n");
        exit(1);
      }
    }
    nSqlPrior = nSql;
    if( nSql==0 ){
      int i;
      for(i=0; zLine[i] && IsSpace(zLine[i]); i++){}






|







4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
      memcpy(zLine,";",2);
    }
    nLine = strlen30(zLine);
    if( nSql+nLine+2>=nAlloc ){
      nAlloc = nSql+nLine+100;
      zSql = realloc(zSql, nAlloc);
      if( zSql==0 ){
        raw_printf(stderr, "Error: out of memory\n");
        exit(1);
      }
    }
    nSqlPrior = nSql;
    if( nSql==0 ){
      int i;
      for(i=0; zLine[i] && IsSpace(zLine[i]); i++){}
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236



4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254

4255
4256
4257
4258
4259
4260
4261
        if( in!=0 || !stdin_is_interactive ){
          sqlite3_snprintf(sizeof(zPrefix), zPrefix, 
                           "Error: near line %d:", startline);
        }else{
          sqlite3_snprintf(sizeof(zPrefix), zPrefix, "Error:");
        }
        if( zErrMsg!=0 ){
          fprintf(stderr, "%s %s\n", zPrefix, zErrMsg);
          sqlite3_free(zErrMsg);
          zErrMsg = 0;
        }else{
          fprintf(stderr, "%s %s\n", zPrefix, sqlite3_errmsg(p->db));
        }
        errCnt++;



      }
      nSql = 0;
      if( p->outCount ){
        output_reset(p);
        p->outCount = 0;
      }
    }else if( nSql && _all_whitespace(zSql) ){
      if( p->echoOn ) printf("%s\n", zSql);
      nSql = 0;
    }
  }
  if( nSql ){
    if( !_all_whitespace(zSql) ){
      fprintf(stderr, "Error: incomplete SQL: %s\n", zSql);
      errCnt++;
    }
    free(zSql);
  }

  free(zLine);
  return errCnt>0;
}

/*
** Return a pathname which is the user's home directory.  A
** 0 return indicates an error of some kind.






|



|


>
>
>













|


<

>







4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448

4449
4450
4451
4452
4453
4454
4455
4456
4457
        if( in!=0 || !stdin_is_interactive ){
          sqlite3_snprintf(sizeof(zPrefix), zPrefix, 
                           "Error: near line %d:", startline);
        }else{
          sqlite3_snprintf(sizeof(zPrefix), zPrefix, "Error:");
        }
        if( zErrMsg!=0 ){
          utf8_printf(stderr, "%s %s\n", zPrefix, zErrMsg);
          sqlite3_free(zErrMsg);
          zErrMsg = 0;
        }else{
          utf8_printf(stderr, "%s %s\n", zPrefix, sqlite3_errmsg(p->db));
        }
        errCnt++;
      }else if( p->countChanges ){
        raw_printf(p->out, "changes: %3d   total_changes: %d\n",
                sqlite3_changes(p->db), sqlite3_total_changes(p->db));
      }
      nSql = 0;
      if( p->outCount ){
        output_reset(p);
        p->outCount = 0;
      }
    }else if( nSql && _all_whitespace(zSql) ){
      if( p->echoOn ) printf("%s\n", zSql);
      nSql = 0;
    }
  }
  if( nSql ){
    if( !_all_whitespace(zSql) ){
      utf8_printf(stderr, "Error: incomplete SQL: %s\n", zSql);
      errCnt++;
    }

  }
  free(zSql);
  free(zLine);
  return errCnt>0;
}

/*
** Return a pathname which is the user's home directory.  A
** 0 return indicates an error of some kind.
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
  const char *sqliterc = sqliterc_override;
  char *zBuf = 0;
  FILE *in = NULL;

  if (sqliterc == NULL) {
    home_dir = find_home_dir();
    if( home_dir==0 ){
      fprintf(stderr, "-- warning: cannot find home directory;"
                      " cannot read ~/.sqliterc\n");
      return;
    }
    sqlite3_initialize();
    zBuf = sqlite3_mprintf("%s/.sqliterc",home_dir);
    sqliterc = zBuf;
  }
  in = fopen(sqliterc,"rb");
  if( in ){
    if( stdin_is_interactive ){
      fprintf(stderr,"-- Loading resources from %s\n",sqliterc);
    }
    process_input(p,in);
    fclose(in);
  }
  sqlite3_free(zBuf);
}







|










|







4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
  const char *sqliterc = sqliterc_override;
  char *zBuf = 0;
  FILE *in = NULL;

  if (sqliterc == NULL) {
    home_dir = find_home_dir();
    if( home_dir==0 ){
      raw_printf(stderr, "-- warning: cannot find home directory;"
                      " cannot read ~/.sqliterc\n");
      return;
    }
    sqlite3_initialize();
    zBuf = sqlite3_mprintf("%s/.sqliterc",home_dir);
    sqliterc = zBuf;
  }
  in = fopen(sqliterc,"rb");
  if( in ){
    if( stdin_is_interactive ){
      utf8_printf(stderr,"-- Loading resources from %s\n",sqliterc);
    }
    process_input(p,in);
    fclose(in);
  }
  sqlite3_free(zBuf);
}

4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415

4416
4417
4418
4419
4420
4421
4422
4423
  "   -version             show SQLite version\n"
  "   -vfs NAME            use NAME as the default VFS\n"
#ifdef SQLITE_ENABLE_VFSTRACE
  "   -vfstrace            enable tracing of all VFS calls\n"
#endif
;
static void usage(int showDetail){
  fprintf(stderr,
      "Usage: %s [OPTIONS] FILENAME [SQL]\n"  
      "FILENAME is the name of an SQLite database. A new database is created\n"
      "if the file does not previously exist.\n", Argv0);
  if( showDetail ){
    fprintf(stderr, "OPTIONS include:\n%s", zOptions);
  }else{
    fprintf(stderr, "Use the -help option for additional information\n");
  }
  exit(1);
}

/*
** Initialize the state information in data
*/
static void main_init(ShellState *data) {
  memset(data, 0, sizeof(*data));

  data->mode = MODE_List;
  memcpy(data->colSeparator,SEP_Column, 2);
  memcpy(data->rowSeparator,SEP_Row, 2);
  data->showHeader = 0;
  data->shellFlgs = SHFLG_Lookaside;
  sqlite3_config(SQLITE_CONFIG_URI, 1);
  sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data);
  sqlite3_config(SQLITE_CONFIG_MULTITHREAD);






|




|

|









>
|







4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
  "   -version             show SQLite version\n"
  "   -vfs NAME            use NAME as the default VFS\n"
#ifdef SQLITE_ENABLE_VFSTRACE
  "   -vfstrace            enable tracing of all VFS calls\n"
#endif
;
static void usage(int showDetail){
  utf8_printf(stderr,
      "Usage: %s [OPTIONS] FILENAME [SQL]\n"  
      "FILENAME is the name of an SQLite database. A new database is created\n"
      "if the file does not previously exist.\n", Argv0);
  if( showDetail ){
    utf8_printf(stderr, "OPTIONS include:\n%s", zOptions);
  }else{
    raw_printf(stderr, "Use the -help option for additional information\n");
  }
  exit(1);
}

/*
** Initialize the state information in data
*/
static void main_init(ShellState *data) {
  memset(data, 0, sizeof(*data));
  data->normalMode = data->cMode = data->mode = MODE_List;
  data->autoExplain = 1;
  memcpy(data->colSeparator,SEP_Column, 2);
  memcpy(data->rowSeparator,SEP_Row, 2);
  data->showHeader = 0;
  data->shellFlgs = SHFLG_Lookaside;
  sqlite3_config(SQLITE_CONFIG_URI, 1);
  sqlite3_config(SQLITE_CONFIG_LOG, shellLog, data);
  sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483

4484
4485
4486
4487
4488
4489
4490
/*
** Get the argument to an --option.  Throw an error and die if no argument
** is available.
*/
static char *cmdline_option_value(int argc, char **argv, int i){
  if( i==argc ){
    fprintf(stderr, "%s: Error: missing argument to %s\n",
            argv[0], argv[argc-1]);
    exit(1);
  }
  return argv[i];
}

int SQLITE_CDECL main(int argc, char **argv){
  char *zErrMsg = 0;
  ShellState data;
  const char *zInitFile = 0;
  int i;
  int rc = 0;
  int warnInmemoryDb = 0;
  int readStdin = 1;
  int nCmd = 0;
  char **azCmd = 0;

#if USE_SYSTEM_SQLITE+0!=1
  if( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)!=0 ){
    fprintf(stderr, "SQLite header and source version mismatch\n%s\n%s\n",
            sqlite3_sourceid(), SQLITE_SOURCE_ID);
    exit(1);
  }
#endif
  setBinaryMode(stdin);
  setvbuf(stderr, 0, _IONBF, 0); /* Make sure stderr is unbuffered */
  Argv0 = argv[0];
  main_init(&data);
  stdin_is_interactive = isatty(0);


  /* Make sure we have a valid signal handler early, before anything
  ** else is done.
  */
#ifdef SIGINT
  signal(SIGINT, interrupt_handler);
#endif






|



















|









>







4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
/*
** Get the argument to an --option.  Throw an error and die if no argument
** is available.
*/
static char *cmdline_option_value(int argc, char **argv, int i){
  if( i==argc ){
    utf8_printf(stderr, "%s: Error: missing argument to %s\n",
            argv[0], argv[argc-1]);
    exit(1);
  }
  return argv[i];
}

int SQLITE_CDECL main(int argc, char **argv){
  char *zErrMsg = 0;
  ShellState data;
  const char *zInitFile = 0;
  int i;
  int rc = 0;
  int warnInmemoryDb = 0;
  int readStdin = 1;
  int nCmd = 0;
  char **azCmd = 0;

#if USE_SYSTEM_SQLITE+0!=1
  if( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)!=0 ){
    utf8_printf(stderr, "SQLite header and source version mismatch\n%s\n%s\n",
            sqlite3_sourceid(), SQLITE_SOURCE_ID);
    exit(1);
  }
#endif
  setBinaryMode(stdin);
  setvbuf(stderr, 0, _IONBF, 0); /* Make sure stderr is unbuffered */
  Argv0 = argv[0];
  main_init(&data);
  stdin_is_interactive = isatty(0);
  stdout_is_console = isatty(1);

  /* Make sure we have a valid signal handler early, before anything
  ** else is done.
  */
#ifdef SIGINT
  signal(SIGINT, interrupt_handler);
#endif
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
      }else{
        /* Excesss arguments are interpreted as SQL (or dot-commands) and
        ** mean that nothing is read from stdin */
        readStdin = 0;
        nCmd++;
        azCmd = realloc(azCmd, sizeof(azCmd[0])*nCmd);
        if( azCmd==0 ){
          fprintf(stderr, "out of memory\n");
          exit(1);
        }
        azCmd[nCmd-1] = z;
      }
    }
    if( z[1]=='-' ) z++;
    if( strcmp(z,"-separator")==0






|







4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
      }else{
        /* Excesss arguments are interpreted as SQL (or dot-commands) and
        ** mean that nothing is read from stdin */
        readStdin = 0;
        nCmd++;
        azCmd = realloc(azCmd, sizeof(azCmd[0])*nCmd);
        if( azCmd==0 ){
          raw_printf(stderr, "out of memory\n");
          exit(1);
        }
        azCmd[nCmd-1] = z;
      }
    }
    if( z[1]=='-' ) z++;
    if( strcmp(z,"-separator")==0
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570

4571
4572
4573
4574
4575
4576
4577
      if( n<1 ) n = 1;
      sqlite3_config(SQLITE_CONFIG_SCRATCH, malloc(n*sz+1), sz, n);
      data.shellFlgs |= SHFLG_Scratch;
    }else if( strcmp(z,"-pagecache")==0 ){
      int n, sz;
      sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( sz>70000 ) sz = 70000;
      if( sz<800 ) sz = 800;
      n = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( n<10 ) n = 10;
      sqlite3_config(SQLITE_CONFIG_PAGECACHE, malloc(n*sz+1), sz, n);

      data.shellFlgs |= SHFLG_Pagecache;
    }else if( strcmp(z,"-lookaside")==0 ){
      int n, sz;
      sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( sz<0 ) sz = 0;
      n = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( n<0 ) n = 0;






|

<
|
>







4758
4759
4760
4761
4762
4763
4764
4765
4766

4767
4768
4769
4770
4771
4772
4773
4774
4775
      if( n<1 ) n = 1;
      sqlite3_config(SQLITE_CONFIG_SCRATCH, malloc(n*sz+1), sz, n);
      data.shellFlgs |= SHFLG_Scratch;
    }else if( strcmp(z,"-pagecache")==0 ){
      int n, sz;
      sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( sz>70000 ) sz = 70000;
      if( sz<0 ) sz = 0;
      n = (int)integerValue(cmdline_option_value(argc,argv,++i));

      sqlite3_config(SQLITE_CONFIG_PAGECACHE,
                    (n>0 && sz>0) ? malloc(n*sz) : 0, sz, n);
      data.shellFlgs |= SHFLG_Pagecache;
    }else if( strcmp(z,"-lookaside")==0 ){
      int n, sz;
      sz = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( sz<0 ) sz = 0;
      n = (int)integerValue(cmdline_option_value(argc,argv,++i));
      if( n<0 ) n = 0;
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
      sqlite3_int64 sz = integerValue(cmdline_option_value(argc,argv,++i));
      sqlite3_config(SQLITE_CONFIG_MMAP_SIZE, sz, sz);
    }else if( strcmp(z,"-vfs")==0 ){
      sqlite3_vfs *pVfs = sqlite3_vfs_find(cmdline_option_value(argc,argv,++i));
      if( pVfs ){
        sqlite3_vfs_register(pVfs, 1);
      }else{
        fprintf(stderr, "no such VFS: \"%s\"\n", argv[i]);
        exit(1);
      }
    }
  }
  if( data.zDbFilename==0 ){
#ifndef SQLITE_OMIT_MEMORYDB
    data.zDbFilename = ":memory:";
    warnInmemoryDb = argc==1;
#else
    fprintf(stderr,"%s: Error: no database filename specified\n", Argv0);
    return 1;
#endif
  }
  data.out = stdout;

  /* Go ahead and open the database file if it already exists.  If the
  ** file does not exist, delay opening it.  This prevents empty database






|









|







4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
      sqlite3_int64 sz = integerValue(cmdline_option_value(argc,argv,++i));
      sqlite3_config(SQLITE_CONFIG_MMAP_SIZE, sz, sz);
    }else if( strcmp(z,"-vfs")==0 ){
      sqlite3_vfs *pVfs = sqlite3_vfs_find(cmdline_option_value(argc,argv,++i));
      if( pVfs ){
        sqlite3_vfs_register(pVfs, 1);
      }else{
        utf8_printf(stderr, "no such VFS: \"%s\"\n", argv[i]);
        exit(1);
      }
    }
  }
  if( data.zDbFilename==0 ){
#ifndef SQLITE_OMIT_MEMORYDB
    data.zDbFilename = ":memory:";
    warnInmemoryDb = argc==1;
#else
    utf8_printf(stderr,"%s: Error: no database filename specified\n", Argv0);
    return 1;
#endif
  }
  data.out = stdout;

  /* Go ahead and open the database file if it already exists.  If the
  ** file does not exist, delay opening it.  This prevents empty database
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747

4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
      if( z[0]=='.' ){
        rc = do_meta_command(z, &data);
        if( rc && bail_on_error ) return rc==2 ? 0 : rc;
      }else{
        open_db(&data, 0);
        rc = shell_exec(data.db, z, shell_callback, &data, &zErrMsg);
        if( zErrMsg!=0 ){
          fprintf(stderr,"Error: %s\n", zErrMsg);
          if( bail_on_error ) return rc!=0 ? rc : 1;
        }else if( rc!=0 ){
          fprintf(stderr,"Error: unable to process SQL \"%s\"\n", z);
          if( bail_on_error ) return rc;
        }
      }
    }else{
      fprintf(stderr,"%s: Error: unknown option: %s\n", Argv0, z);
      fprintf(stderr,"Use -help for a list of options.\n");
      return 1;
    }

  }

  if( !readStdin ){
    /* Run all arguments that do not begin with '-' as if they were separate
    ** command-line inputs, except for the argToSkip argument which contains
    ** the database filename.
    */
    for(i=0; i<nCmd; i++){
      if( azCmd[i][0]=='.' ){
        rc = do_meta_command(azCmd[i], &data);
        if( rc ) return rc==2 ? 0 : rc;
      }else{
        open_db(&data, 0);
        rc = shell_exec(data.db, azCmd[i], shell_callback, &data, &zErrMsg);
        if( zErrMsg!=0 ){
          fprintf(stderr,"Error: %s\n", zErrMsg);
          return rc!=0 ? rc : 1;
        }else if( rc!=0 ){
          fprintf(stderr,"Error: unable to process SQL: %s\n", azCmd[i]);
          return rc;
        }
      }
    }
    free(azCmd);
  }else{
    /* Run commands received from standard input






|


|




|
|


>















|


|







4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
      if( z[0]=='.' ){
        rc = do_meta_command(z, &data);
        if( rc && bail_on_error ) return rc==2 ? 0 : rc;
      }else{
        open_db(&data, 0);
        rc = shell_exec(data.db, z, shell_callback, &data, &zErrMsg);
        if( zErrMsg!=0 ){
          utf8_printf(stderr,"Error: %s\n", zErrMsg);
          if( bail_on_error ) return rc!=0 ? rc : 1;
        }else if( rc!=0 ){
          utf8_printf(stderr,"Error: unable to process SQL \"%s\"\n", z);
          if( bail_on_error ) return rc;
        }
      }
    }else{
      utf8_printf(stderr,"%s: Error: unknown option: %s\n", Argv0, z);
      raw_printf(stderr,"Use -help for a list of options.\n");
      return 1;
    }
    data.cMode = data.mode;
  }

  if( !readStdin ){
    /* Run all arguments that do not begin with '-' as if they were separate
    ** command-line inputs, except for the argToSkip argument which contains
    ** the database filename.
    */
    for(i=0; i<nCmd; i++){
      if( azCmd[i][0]=='.' ){
        rc = do_meta_command(azCmd[i], &data);
        if( rc ) return rc==2 ? 0 : rc;
      }else{
        open_db(&data, 0);
        rc = shell_exec(data.db, azCmd[i], shell_callback, &data, &zErrMsg);
        if( zErrMsg!=0 ){
          utf8_printf(stderr,"Error: %s\n", zErrMsg);
          return rc!=0 ? rc : 1;
        }else if( rc!=0 ){
          utf8_printf(stderr,"Error: unable to process SQL: %s\n", azCmd[i]);
          return rc;
        }
      }
    }
    free(azCmd);
  }else{
    /* Run commands received from standard input
Changes to src/sitemap.c.
113
114
115
116
117
118
119

120
121
122
123
124
125
126
    @   <li>%z(href("%R/hash-collisions"))Collisions on SHA1 hash
    @       prefixes</a></li>
    if( g.perm.Admin ){
      @   <li>%z(href("%R/urllist"))List of URLs used to access
      @       this repository</a></li>
    }
    @   <li>%z(href("%R/bloblist"))List of Artifacts</a></li>

    @   </ul>
    @ </li>
  }
  @ <li>On-line Documentation
  @   <ul>
  @   <li>%z(href("%R/help"))List of All Commands and Web Pages</a></li>
  @   <li>%z(href("%R/test-all-help"))All "help" text on a single page</a></li>






>







113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    @   <li>%z(href("%R/hash-collisions"))Collisions on SHA1 hash
    @       prefixes</a></li>
    if( g.perm.Admin ){
      @   <li>%z(href("%R/urllist"))List of URLs used to access
      @       this repository</a></li>
    }
    @   <li>%z(href("%R/bloblist"))List of Artifacts</a></li>
    @   <li>%z(href("%R/timewarps"))List of "Timewarp" Check-ins</a></li>
    @   </ul>
    @ </li>
  }
  @ <li>On-line Documentation
  @   <ul>
  @   <li>%z(href("%R/help"))List of All Commands and Web Pages</a></li>
  @   <li>%z(href("%R/test-all-help"))All "help" text on a single page</a></li>
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
    @   </ul></li>
  }
  @ <li>Test Pages
  @   <ul>
  if( g.perm.Admin || db_get_boolean("test_env_enable",0) ){
    @   <li>%z(href("%R/test_env"))CGI Environment Test</a></li>
  }
  if( g.perm.Read && g.perm.Hyperlink ){
    @   <li>%z(href("%R/test_timewarps"))List of "Timewarp" Check-ins</a></li>
  }
  if( g.perm.Read ){
    @   <li>%z(href("%R/test-rename-list"))List of file renames</a></li>
  }
  @   <li>%z(href("%R/hash-color-test"))Page to experiment with the automatic
  @       colors assigned to branch names</a>
  @   <li>%z(href("%R/test-captcha"))Random ASCII-art Captcha image</a></li>
  @   </ul></li>
  @ </ul></li>
  style_footer();
}






<
<
<










136
137
138
139
140
141
142



143
144
145
146
147
148
149
150
151
152
    @   </ul></li>
  }
  @ <li>Test Pages
  @   <ul>
  if( g.perm.Admin || db_get_boolean("test_env_enable",0) ){
    @   <li>%z(href("%R/test_env"))CGI Environment Test</a></li>
  }



  if( g.perm.Read ){
    @   <li>%z(href("%R/test-rename-list"))List of file renames</a></li>
  }
  @   <li>%z(href("%R/hash-color-test"))Page to experiment with the automatic
  @       colors assigned to branch names</a>
  @   <li>%z(href("%R/test-captcha"))Random ASCII-art Captcha image</a></li>
  @   </ul></li>
  @ </ul></li>
  style_footer();
}
Changes to src/skins.c.
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
** attributes of the skin that cannot be easily specified using CSS
** or that need to be known on the server-side.
**
** The following array holds the value for all known skin details.
*/
static struct SkinDetail {
  const char *zName;      /* Name of the detail */
  char *zValue;           /* Value of the detail */
} aSkinDetail[] = {
  { "timeline-arrowheads",        "1"  },
  { "timeline-circle-nodes",      "0"  },
  { "timeline-color-graph-lines", "0"  },
  { "white-foreground",           "0"  },
};







|







71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
** attributes of the skin that cannot be easily specified using CSS
** or that need to be known on the server-side.
**
** The following array holds the value for all known skin details.
*/
static struct SkinDetail {
  const char *zName;      /* Name of the detail */
  const char *zValue;     /* Value of the detail */
} aSkinDetail[] = {
  { "timeline-arrowheads",        "1"  },
  { "timeline-circle-nodes",      "0"  },
  { "timeline-color-graph-lines", "0"  },
  { "white-foreground",           "0"  },
};

Changes to src/sqlcmd.c.
150
151
152
153
154
155
156
157
158
159
160
161
162
163




164
165
166
167
168
169
170
  g.db = db;
  return SQLITE_OK;
}

/*
** COMMAND: sqlite3
**
** Usage: %fossil sqlite3 ?DATABASE? ?OPTIONS?
**
** Run the standalone sqlite3 command-line shell on DATABASE with OPTIONS.
** If DATABASE is omitted, then the repository that serves the working
** directory is opened.  See https://www.sqlite.org/cli.html for additional
** information.
**




** WARNING:  Careless use of this command can corrupt a Fossil repository
** in ways that are unrecoverable.  Be sure you know what you are doing before
** running any SQL commands that modifies the repository database.
**
** The following extensions to the usual SQLite commands are provided:
**
**    content(X)                Return the contenxt of artifact X.  X can be a






|

|




>
>
>
>







150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  g.db = db;
  return SQLITE_OK;
}

/*
** COMMAND: sqlite3
**
** Usage: %fossil sqlite3 ?FOSSIL_OPTS? ?DATABASE? ?SHELL_OPTS?
**
** Run the standalone sqlite3 command-line shell on DATABASE with SHELL_OPTS.
** If DATABASE is omitted, then the repository that serves the working
** directory is opened.  See https://www.sqlite.org/cli.html for additional
** information.
**
** Fossil Options:
**
**    --no-repository           Skip opening the repository database.
**
** WARNING:  Careless use of this command can corrupt a Fossil repository
** in ways that are unrecoverable.  Be sure you know what you are doing before
** running any SQL commands that modifies the repository database.
**
** The following extensions to the usual SQLite commands are provided:
**
**    content(X)                Return the contenxt of artifact X.  X can be a
191
192
193
194
195
196
197

198


199

200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217













**
** Usage example for files_of_checkin:
**
**     CREATE VIRTUAL TABLE temp.foci USING files_of_checkin;
**     SELECT * FROM foci WHERE checkinID=symbolic_name_to_rid('trunk');
*/
void cmd_sqlite3(void){

  extern int sqlite3_shell(int, char**);


  db_find_and_open_repository(OPEN_ANY_SCHEMA, 0);

  db_close(1);
  sqlite3_shutdown();
  sqlite3_shell(g.argc-1, g.argv+1);
  sqlite3_cancel_auto_extension((void(*)(void))sqlcmd_autoinit);
  g.db = 0;
  g.zMainDbType = 0;
  g.repositoryOpen = 0;
  g.localOpen = 0;
}

/*
** This routine is called by the patched sqlite3 command-line shell in order
** to load the name and database connection for the open Fossil database.
*/
void fossil_open(const char **pzRepoName){
  sqlite3_auto_extension((void(*)(void))sqlcmd_autoinit);
  *pzRepoName = g.zRepositoryName;
}



















>

>
>
|
>
|



<
<
|
<










>
>
>
>
>
>
>
>
>
>
>
>
>
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211


212

213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
**
** Usage example for files_of_checkin:
**
**     CREATE VIRTUAL TABLE temp.foci USING files_of_checkin;
**     SELECT * FROM foci WHERE checkinID=symbolic_name_to_rid('trunk');
*/
void cmd_sqlite3(void){
  int noRepository;
  extern int sqlite3_shell(int, char**);
  noRepository = find_option("no-repository", 0, 0)!=0;
  if( !noRepository ){
    db_find_and_open_repository(OPEN_ANY_SCHEMA, 0);
  }
  fossil_close(1, noRepository);
  sqlite3_shutdown();
  sqlite3_shell(g.argc-1, g.argv+1);
  sqlite3_cancel_auto_extension((void(*)(void))sqlcmd_autoinit);


  fossil_close(0, noRepository);

}

/*
** This routine is called by the patched sqlite3 command-line shell in order
** to load the name and database connection for the open Fossil database.
*/
void fossil_open(const char **pzRepoName){
  sqlite3_auto_extension((void(*)(void))sqlcmd_autoinit);
  *pzRepoName = g.zRepositoryName;
}

/*
** This routine closes the Fossil databases and/or invalidates the global
** state variables that keep track of them.
*/
void fossil_close(int bDb, int noRepository){
  if( bDb ) db_close(1);
  if( noRepository ) g.zRepositoryName = 0;
  g.db = 0;
  g.zMainDbType = 0;
  g.repositoryOpen = 0;
  g.localOpen = 0;
}
Changes to src/sqlite3.c.

more than 10,000 changes

Changes to src/sqlite3.h.
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
** string contains the date and time of the check-in (UTC) and an SHA1
** hash of the entire source tree.
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.8.11"
#define SQLITE_VERSION_NUMBER 3008011
#define SQLITE_SOURCE_ID      "2015-07-27 13:49:41 b8e92227a469de677a66da62e4361f099c0b79d0"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
** but are associated with the library instead of the header file.  ^(Cautious
** programmers might include assert() statements in their application to
** verify that values returned by these interfaces match the macros in
** the header, and thus insure that the application is
** compiled with matching library and header files.
**
** <blockquote><pre>
** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
** </pre></blockquote>)^






|
|
|










|







107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
** string contains the date and time of the check-in (UTC) and an SHA1
** hash of the entire source tree.
**
** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION        "3.11.0"
#define SQLITE_VERSION_NUMBER 3011000
#define SQLITE_SOURCE_ID      "2016-02-15 17:29:24 3d862f207e3adc00f78066799ac5a8c282430a5f"

/*
** CAPI3REF: Run-Time Library Version Numbers
** KEYWORDS: sqlite3_version, sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
** but are associated with the library instead of the header file.  ^(Cautious
** programmers might include assert() statements in their application to
** verify that values returned by these interfaces match the macros in
** the header, and thus ensure that the application is
** compiled with matching library and header files.
**
** <blockquote><pre>
** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
** </pre></blockquote>)^
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
** ^If an error occurs while evaluating the SQL statements passed into
** sqlite3_exec(), then execution of the current statement stops and
** subsequent statements are skipped.  ^If the 5th parameter to sqlite3_exec()
** is not NULL then any error message is written into memory obtained
** from [sqlite3_malloc()] and passed back through the 5th parameter.
** To avoid memory leaks, the application should invoke [sqlite3_free()]
** on error message strings returned through the 5th parameter of
** of sqlite3_exec() after the error message string is no longer needed.
** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors
** occur, then sqlite3_exec() sets the pointer in its 5th parameter to
** NULL before returning.
**
** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec()
** routine returns SQLITE_ABORT without invoking the callback again and
** without running any subsequent SQL statements.






|







343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
** ^If an error occurs while evaluating the SQL statements passed into
** sqlite3_exec(), then execution of the current statement stops and
** subsequent statements are skipped.  ^If the 5th parameter to sqlite3_exec()
** is not NULL then any error message is written into memory obtained
** from [sqlite3_malloc()] and passed back through the 5th parameter.
** To avoid memory leaks, the application should invoke [sqlite3_free()]
** on error message strings returned through the 5th parameter of
** sqlite3_exec() after the error message string is no longer needed.
** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors
** occur, then sqlite3_exec() sets the pointer in its 5th parameter to
** NULL before returning.
**
** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec()
** routine returns SQLITE_ABORT without invoking the callback again and
** without running any subsequent SQL statements.
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
** to an empty string, or a pointer that contains only whitespace and/or 
** SQL comments, then no SQL statements are evaluated and the database
** is not changed.
**
** Restrictions:
**
** <ul>
** <li> The application must insure that the 1st parameter to sqlite3_exec()
**      is a valid and open [database connection].
** <li> The application must not close the [database connection] specified by
**      the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not modify the SQL statement text passed into
**      the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/






|







370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
** to an empty string, or a pointer that contains only whitespace and/or 
** SQL comments, then no SQL statements are evaluated and the database
** is not changed.
**
** Restrictions:
**
** <ul>
** <li> The application must ensure that the 1st parameter to sqlite3_exec()
**      is a valid and open [database connection].
** <li> The application must not close the [database connection] specified by
**      the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
** <li> The application must not modify the SQL statement text passed into
**      the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/
473
474
475
476
477
478
479


480
481
482
483
484
485
486
#define SQLITE_IOERR_SHMLOCK           (SQLITE_IOERR | (20<<8))
#define SQLITE_IOERR_SHMMAP            (SQLITE_IOERR | (21<<8))
#define SQLITE_IOERR_SEEK              (SQLITE_IOERR | (22<<8))
#define SQLITE_IOERR_DELETE_NOENT      (SQLITE_IOERR | (23<<8))
#define SQLITE_IOERR_MMAP              (SQLITE_IOERR | (24<<8))
#define SQLITE_IOERR_GETTEMPPATH       (SQLITE_IOERR | (25<<8))
#define SQLITE_IOERR_CONVPATH          (SQLITE_IOERR | (26<<8))


#define SQLITE_LOCKED_SHAREDCACHE      (SQLITE_LOCKED |  (1<<8))
#define SQLITE_BUSY_RECOVERY           (SQLITE_BUSY   |  (1<<8))
#define SQLITE_BUSY_SNAPSHOT           (SQLITE_BUSY   |  (2<<8))
#define SQLITE_CANTOPEN_NOTEMPDIR      (SQLITE_CANTOPEN | (1<<8))
#define SQLITE_CANTOPEN_ISDIR          (SQLITE_CANTOPEN | (2<<8))
#define SQLITE_CANTOPEN_FULLPATH       (SQLITE_CANTOPEN | (3<<8))
#define SQLITE_CANTOPEN_CONVPATH       (SQLITE_CANTOPEN | (4<<8))






>
>







473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
#define SQLITE_IOERR_SHMLOCK           (SQLITE_IOERR | (20<<8))
#define SQLITE_IOERR_SHMMAP            (SQLITE_IOERR | (21<<8))
#define SQLITE_IOERR_SEEK              (SQLITE_IOERR | (22<<8))
#define SQLITE_IOERR_DELETE_NOENT      (SQLITE_IOERR | (23<<8))
#define SQLITE_IOERR_MMAP              (SQLITE_IOERR | (24<<8))
#define SQLITE_IOERR_GETTEMPPATH       (SQLITE_IOERR | (25<<8))
#define SQLITE_IOERR_CONVPATH          (SQLITE_IOERR | (26<<8))
#define SQLITE_IOERR_VNODE             (SQLITE_IOERR | (27<<8))
#define SQLITE_IOERR_AUTH              (SQLITE_IOERR | (28<<8))
#define SQLITE_LOCKED_SHAREDCACHE      (SQLITE_LOCKED |  (1<<8))
#define SQLITE_BUSY_RECOVERY           (SQLITE_BUSY   |  (1<<8))
#define SQLITE_BUSY_SNAPSHOT           (SQLITE_BUSY   |  (2<<8))
#define SQLITE_CANTOPEN_NOTEMPDIR      (SQLITE_CANTOPEN | (1<<8))
#define SQLITE_CANTOPEN_ISDIR          (SQLITE_CANTOPEN | (2<<8))
#define SQLITE_CANTOPEN_FULLPATH       (SQLITE_CANTOPEN | (3<<8))
#define SQLITE_CANTOPEN_CONVPATH       (SQLITE_CANTOPEN | (4<<8))
788
789
790
791
792
793
794
795
796





797
798
799
800
801
802
803
** for the nominated database. Allocating database file space in large
** chunks (say 1MB at a time), may reduce file-system fragmentation and
** improve performance on some systems.
**
** <li>[[SQLITE_FCNTL_FILE_POINTER]]
** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer
** to the [sqlite3_file] object associated with a particular database
** connection.  See the [sqlite3_file_control()] documentation for
** additional information.





**
** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
** No longer in use.
**
** <li>[[SQLITE_FCNTL_SYNC]]
** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
** sent to the VFS immediately before the xSync method is invoked on a






|
|
>
>
>
>
>







790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
** for the nominated database. Allocating database file space in large
** chunks (say 1MB at a time), may reduce file-system fragmentation and
** improve performance on some systems.
**
** <li>[[SQLITE_FCNTL_FILE_POINTER]]
** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer
** to the [sqlite3_file] object associated with a particular database
** connection.  See also [SQLITE_FCNTL_JOURNAL_POINTER].
**
** <li>[[SQLITE_FCNTL_JOURNAL_POINTER]]
** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer
** to the [sqlite3_file] object associated with the journal file (either
** the [rollback journal] or the [write-ahead log]) for a particular database
** connection.  See also [SQLITE_FCNTL_FILE_POINTER].
**
** <li>[[SQLITE_FCNTL_SYNC_OMITTED]]
** No longer in use.
**
** <li>[[SQLITE_FCNTL_SYNC]]
** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
** sent to the VFS immediately before the xSync method is invoked on a
875
876
877
878
879
880
881









882
883
884
885
886
887
888
** [sqlite3_malloc()] and the result is stored in the char* variable
** that the fourth parameter of [sqlite3_file_control()] points to.
** The caller is responsible for freeing the memory when done.  As with
** all file-control actions, there is no guarantee that this will actually
** do anything.  Callers should initialize the char* variable to a NULL
** pointer in case this file-control is not implemented.  This file-control
** is intended for diagnostic use only.









**
** <li>[[SQLITE_FCNTL_PRAGMA]]
** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] 
** file control is sent to the open [sqlite3_file] object corresponding
** to the database file to which the pragma statement refers. ^The argument
** to the [SQLITE_FCNTL_PRAGMA] file control is an array of
** pointers to strings (char**) in which the second element of the array






>
>
>
>
>
>
>
>
>







882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
** [sqlite3_malloc()] and the result is stored in the char* variable
** that the fourth parameter of [sqlite3_file_control()] points to.
** The caller is responsible for freeing the memory when done.  As with
** all file-control actions, there is no guarantee that this will actually
** do anything.  Callers should initialize the char* variable to a NULL
** pointer in case this file-control is not implemented.  This file-control
** is intended for diagnostic use only.
**
** <li>[[SQLITE_FCNTL_VFS_POINTER]]
** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level
** [VFSes] currently in use.  ^(The argument X in
** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be
** of type "[sqlite3_vfs] **".  This opcodes will set *X
** to a pointer to the top-level VFS.)^
** ^When there are multiple VFS shims in the stack, this opcode finds the
** upper-most shim only.
**
** <li>[[SQLITE_FCNTL_PRAGMA]]
** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] 
** file control is sent to the open [sqlite3_file] object corresponding
** to the database file to which the pragma statement refers. ^The argument
** to the [SQLITE_FCNTL_PRAGMA] file control is an array of
** pointers to strings (char**) in which the second element of the array
994
995
996
997
998
999
1000


1001
1002
1003
1004
1005
1006
1007
#define SQLITE_FCNTL_HAS_MOVED              20
#define SQLITE_FCNTL_SYNC                   21
#define SQLITE_FCNTL_COMMIT_PHASETWO        22
#define SQLITE_FCNTL_WIN32_SET_HANDLE       23
#define SQLITE_FCNTL_WAL_BLOCK              24
#define SQLITE_FCNTL_ZIPVFS                 25
#define SQLITE_FCNTL_RBU                    26



/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE      SQLITE_FCNTL_GET_LOCKPROXYFILE
#define SQLITE_SET_LOCKPROXYFILE      SQLITE_FCNTL_SET_LOCKPROXYFILE
#define SQLITE_LAST_ERRNO             SQLITE_FCNTL_LAST_ERRNO








>
>







1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
#define SQLITE_FCNTL_HAS_MOVED              20
#define SQLITE_FCNTL_SYNC                   21
#define SQLITE_FCNTL_COMMIT_PHASETWO        22
#define SQLITE_FCNTL_WIN32_SET_HANDLE       23
#define SQLITE_FCNTL_WAL_BLOCK              24
#define SQLITE_FCNTL_ZIPVFS                 25
#define SQLITE_FCNTL_RBU                    26
#define SQLITE_FCNTL_VFS_POINTER            27
#define SQLITE_FCNTL_JOURNAL_POINTER        28

/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE      SQLITE_FCNTL_GET_LOCKPROXYFILE
#define SQLITE_SET_LOCKPROXYFILE      SQLITE_FCNTL_SET_LOCKPROXYFILE
#define SQLITE_LAST_ERRNO             SQLITE_FCNTL_LAST_ERRNO


1362
1363
1364
1365
1366
1367
1368
1369
1370
1371


1372
1373
1374
1375
1376
1377
1378
**
** The sqlite3_config() interface is used to make global configuration
** changes to SQLite in order to tune SQLite to the specific needs of
** the application.  The default configuration is recommended for most
** applications and so this routine is usually not necessary.  It is
** provided to support rare applications with unusual needs.
**
** The sqlite3_config() interface is not threadsafe.  The application
** must insure that no other SQLite interfaces are invoked by other
** threads while sqlite3_config() is running.  Furthermore, sqlite3_config()


** may only be invoked prior to library initialization using
** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
** [sqlite3_shutdown()] then it will return SQLITE_MISUSE.
** Note, however, that ^sqlite3_config() can be called as part of the
** implementation of an application-defined [sqlite3_os_init()].
**






|
|
|
>
>







1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
**
** The sqlite3_config() interface is used to make global configuration
** changes to SQLite in order to tune SQLite to the specific needs of
** the application.  The default configuration is recommended for most
** applications and so this routine is usually not necessary.  It is
** provided to support rare applications with unusual needs.
**
** <b>The sqlite3_config() interface is not threadsafe. The application
** must ensure that no other SQLite interfaces are invoked by other
** threads while sqlite3_config() is running.</b>
**
** The sqlite3_config() interface
** may only be invoked prior to library initialization using
** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
** [sqlite3_shutdown()] then it will return SQLITE_MISUSE.
** Note, however, that ^sqlite3_config() can be called as part of the
** implementation of an application-defined [sqlite3_os_init()].
**
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613

1614
1615



1616

1617

1618
1619
1620

1621
1622
1623
1624
1625
1626
1627
** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large
** [sqlite3_malloc|heap allocations].
** This can help [Robson proof|prevent memory allocation failures] due to heap
** fragmentation in low-memory embedded systems.
** </dd>
**
** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a static memory buffer
** that SQLite can use for the database page cache with the default page
** cache implementation.  
** This configuration should not be used if an application-define page
** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]
** configuration option.
** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
** 8-byte aligned
** memory, the size of each page buffer (sz), and the number of pages (N).
** The sz argument should be the size of the largest database page
** (a power of two between 512 and 65536) plus some extra bytes for each
** page header.  ^The number of extra bytes needed by the page header
** can be determined using the [SQLITE_CONFIG_PCACHE_HDRSZ] option 
** to [sqlite3_config()].
** ^It is harmless, apart from the wasted memory,
** for the sz parameter to be larger than necessary.  The first

** argument should pointer to an 8-byte aligned block of memory that
** is at least sz*N bytes of memory, otherwise subsequent behavior is



** undefined.

** ^SQLite will use the memory provided by the first argument to satisfy its

** memory needs for the first N pages that it adds to cache.  ^If additional
** page cache memory is needed beyond what is provided by this option, then
** SQLite goes to [sqlite3_malloc()] for the additional storage space.</dd>

**
** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer 
** that SQLite will use for all of its dynamic memory allocation needs
** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and
** [SQLITE_CONFIG_PAGECACHE].
** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled






|


|
|
<

|
|



|
<

|
>
|
|
>
>
>
|
>
|
>
|
|
|
>







1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622

1623
1624
1625
1626
1627
1628
1629

1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large
** [sqlite3_malloc|heap allocations].
** This can help [Robson proof|prevent memory allocation failures] due to heap
** fragmentation in low-memory embedded systems.
** </dd>
**
** [[SQLITE_CONFIG_PAGECACHE]] <dt>SQLITE_CONFIG_PAGECACHE</dt>
** <dd> ^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool
** that SQLite can use for the database page cache with the default page
** cache implementation.  
** This configuration option is a no-op if an application-define page
** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2].

** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
** 8-byte aligned memory (pMem), the size of each page cache line (sz),
** and the number of cache lines (N).
** The sz argument should be the size of the largest database page
** (a power of two between 512 and 65536) plus some extra bytes for each
** page header.  ^The number of extra bytes needed by the page header
** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ].

** ^It is harmless, apart from the wasted memory,
** for the sz parameter to be larger than necessary.  The pMem
** argument must be either a NULL pointer or a pointer to an 8-byte
** aligned block of memory of at least sz*N bytes, otherwise
** subsequent behavior is undefined.
** ^When pMem is not NULL, SQLite will strive to use the memory provided
** to satisfy page cache needs, falling back to [sqlite3_malloc()] if
** a page cache line is larger than sz bytes or if all of the pMem buffer
** is exhausted.
** ^If pMem is NULL and N is non-zero, then each database connection
** does an initial bulk allocation for page cache memory
** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or
** of -1024*N bytes if N is negative, . ^If additional
** page cache memory is needed beyond what is provided by the initial
** allocation, then SQLite goes to [sqlite3_malloc()] separately for each
** additional cache line. </dd>
**
** [[SQLITE_CONFIG_HEAP]] <dt>SQLITE_CONFIG_HEAP</dt>
** <dd> ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer 
** that SQLite will use for all of its dynamic memory allocation needs
** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and
** [SQLITE_CONFIG_PAGECACHE].
** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
3369
3370
3371
3372
3373
3374
3375
3376

3377
3378
3379
3380
3381
3382
3383
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
** [prepared statement] S has been stepped at least once using 
** [sqlite3_step(S)] but has not run to completion and/or has not 

** been reset using [sqlite3_reset(S)].  ^The sqlite3_stmt_busy(S)
** interface returns false if S is a NULL pointer.  If S is not a 
** NULL pointer and is not a pointer to a valid [prepared statement]
** object, then the behavior is undefined and probably undesirable.
**
** This interface can be used in combination [sqlite3_next_stmt()]
** to locate all prepared statements associated with a database 






|
>







3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
** [prepared statement] S has been stepped at least once using 
** [sqlite3_step(S)] but has neither run to completion (returned
** [SQLITE_DONE] from [sqlite3_step(S)]) nor
** been reset using [sqlite3_reset(S)].  ^The sqlite3_stmt_busy(S)
** interface returns false if S is a NULL pointer.  If S is not a 
** NULL pointer and is not a pointer to a valid [prepared statement]
** object, then the behavior is undefined and probably undesirable.
**
** This interface can be used in combination [sqlite3_next_stmt()]
** to locate all prepared statements associated with a database 
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
** parameter to [sqlite3_bind_blob|sqlite3_bind()].  ^A zero
** is returned if no matching parameter is found.  ^The parameter
** name must be given in UTF-8 even if the original statement
** was prepared from UTF-16 text using [sqlite3_prepare16_v2()].
**
** See also: [sqlite3_bind_blob|sqlite3_bind()],
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_index()].
*/
SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);

/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
** METHOD: sqlite3_stmt
**






|







3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
** parameter to [sqlite3_bind_blob|sqlite3_bind()].  ^A zero
** is returned if no matching parameter is found.  ^The parameter
** name must be given in UTF-8 even if the original statement
** was prepared from UTF-16 text using [sqlite3_prepare16_v2()].
**
** See also: [sqlite3_bind_blob|sqlite3_bind()],
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_name()].
*/
SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);

/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
** METHOD: sqlite3_stmt
**
4351
4352
4353
4354
4355
4356
4357
















4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);

















/*
** CAPI3REF: Copy And Free SQL Values
** METHOD: sqlite3_value
**
** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
** object D and returns a pointer to that copy.  ^The [sqlite3_value] returned
** is a [protected sqlite3_value] object even if the input is not.
** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
** memory allocation fails.
**
** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
** previously obtained from [sqlite3_value_dup()].  ^If V is a NULL pointer
** then sqlite3_value_free(V) is a harmless no-op.
*/
SQLITE_API SQLITE_EXPERIMENTAL sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);

/*
** CAPI3REF: Obtain Aggregate Function Context
** METHOD: sqlite3_context
**
** Implementations of aggregate SQL functions use this
** routine to allocate memory for storing their state.






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>














|
|







4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);

/*
** CAPI3REF: Finding The Subtype Of SQL Values
** METHOD: sqlite3_value
**
** The sqlite3_value_subtype(V) function returns the subtype for
** an [application-defined SQL function] argument V.  The subtype
** information can be used to pass a limited amount of context from
** one SQL function to another.  Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
**
** SQLite makes no use of subtype itself.  It merely passes the subtype
** from the result of one [application-defined SQL function] into the
** input of another.
*/
SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);

/*
** CAPI3REF: Copy And Free SQL Values
** METHOD: sqlite3_value
**
** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
** object D and returns a pointer to that copy.  ^The [sqlite3_value] returned
** is a [protected sqlite3_value] object even if the input is not.
** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
** memory allocation fails.
**
** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
** previously obtained from [sqlite3_value_dup()].  ^If V is a NULL pointer
** then sqlite3_value_free(V) is a harmless no-op.
*/
SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);

/*
** CAPI3REF: Obtain Aggregate Function Context
** METHOD: sqlite3_context
**
** Implementations of aggregate SQL functions use this
** routine to allocate memory for storing their state.
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
** Refer to the [SQL parameter] documentation for additional information.
**
** ^The sqlite3_result_blob() interface sets the result from
** an application-defined function to be the BLOB whose content is pointed
** to by the second parameter and which is N bytes long where N is the
** third parameter.
**
** ^The sqlite3_result_zeroblob() and zeroblob64() interfaces set the result 
** of the application-defined function to be a BLOB containing all zero
** bytes and N bytes in size, where N is the value of the 2nd parameter.
**
** ^The sqlite3_result_double() interface sets the result from
** an application-defined function to be a floating point value specified
** by its 2nd argument.
**
** ^The sqlite3_result_error() and sqlite3_result_error16() functions
** cause the implemented SQL function to throw an exception.






|
|
|







4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
** Refer to the [SQL parameter] documentation for additional information.
**
** ^The sqlite3_result_blob() interface sets the result from
** an application-defined function to be the BLOB whose content is pointed
** to by the second parameter and which is N bytes long where N is the
** third parameter.
**
** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
** interfaces set the result of the application-defined function to be
** a BLOB containing all zero bytes and N bytes in size.
**
** ^The sqlite3_result_double() interface sets the result from
** an application-defined function to be a floating point value specified
** by its 2nd argument.
**
** ^The sqlite3_result_error() and sqlite3_result_error16() functions
** cause the implemented SQL function to throw an exception.
4650
4651
4652
4653
4654
4655
4656















4657
4658
4659
4660
4661
4662
4663
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
















/*
** CAPI3REF: Define New Collating Sequences
** METHOD: sqlite3
**
** ^These functions add, remove, or modify a [collation] associated
** with the [database connection] specified as the first argument.
**






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);


/*
** CAPI3REF: Setting The Subtype Of An SQL Function
** METHOD: sqlite3_context
**
** The sqlite3_result_subtype(C,T) function causes the subtype of
** the result from the [application-defined SQL function] with 
** [sqlite3_context] C to be the value T.  Only the lower 8 bits 
** of the subtype T are preserved in current versions of SQLite;
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
*/
SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);

/*
** CAPI3REF: Define New Collating Sequences
** METHOD: sqlite3
**
** ^These functions add, remove, or modify a [collation] associated
** with the [database connection] specified as the first argument.
**
5569
5570
5571
5572
5573
5574
5575











5576
5577
5578
5579
5580
5581
5582
** and makes other simplifications to the WHERE clause in an attempt to
** get as many WHERE clause terms into the form shown above as possible.
** ^The aConstraint[] array only reports WHERE clause terms that are
** relevant to the particular virtual table being queried.
**
** ^Information about the ORDER BY clause is stored in aOrderBy[].
** ^Each term of aOrderBy records a column of the ORDER BY clause.











**
** The [xBestIndex] method must fill aConstraintUsage[] with information
** about what parameters to pass to xFilter.  ^If argvIndex>0 then
** the right-hand side of the corresponding aConstraint[] is evaluated
** and becomes the argvIndex-th entry in argv.  ^(If aConstraintUsage[].omit
** is true, then the constraint is assumed to be fully handled by the
** virtual table and is not checked again by SQLite.)^






>
>
>
>
>
>
>
>
>
>
>







5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
** and makes other simplifications to the WHERE clause in an attempt to
** get as many WHERE clause terms into the form shown above as possible.
** ^The aConstraint[] array only reports WHERE clause terms that are
** relevant to the particular virtual table being queried.
**
** ^Information about the ORDER BY clause is stored in aOrderBy[].
** ^Each term of aOrderBy records a column of the ORDER BY clause.
**
** The colUsed field indicates which columns of the virtual table may be
** required by the current scan. Virtual table columns are numbered from
** zero in the order in which they appear within the CREATE TABLE statement
** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62),
** the corresponding bit is set within the colUsed mask if the column may be
** required by SQLite. If the table has at least 64 columns and any column
** to the right of the first 63 is required, then bit 63 of colUsed is also
** set. In other words, column iCol may be required if the expression
** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to 
** non-zero.
**
** The [xBestIndex] method must fill aConstraintUsage[] with information
** about what parameters to pass to xFilter.  ^If argvIndex>0 then
** the right-hand side of the corresponding aConstraint[] is evaluated
** and becomes the argvIndex-th entry in argv.  ^(If aConstraintUsage[].omit
** is true, then the constraint is assumed to be fully handled by the
** virtual table and is not checked again by SQLite.)^
5594
5595
5596
5597
5598
5599
5600















5601
5602
5603
5604
5605
5606
5607
5608



5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
** strategy. A cost of N indicates that the cost of the strategy is similar
** to a linear scan of an SQLite table with N rows. A cost of log(N) 
** indicates that the expense of the operation is similar to that of a
** binary search on a unique indexed field of an SQLite table with N rows.
**
** ^The estimatedRows value is an estimate of the number of rows that
** will be returned by the strategy.















**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
** structure for SQLite version 3.8.2. If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting 
** to read or write the estimatedRows field are undefined (but are likely 
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
** value greater than or equal to 3008002.



*/
struct sqlite3_index_info {
  /* Inputs */
  int nConstraint;           /* Number of entries in aConstraint */
  struct sqlite3_index_constraint {
     int iColumn;              /* Column on left-hand side of constraint */
     unsigned char op;         /* Constraint operator */
     unsigned char usable;     /* True if this constraint is usable */
     int iTermOffset;          /* Used internally - xBestIndex should ignore */
  } *aConstraint;            /* Table of WHERE clause constraints */
  int nOrderBy;              /* Number of terms in the ORDER BY clause */
  struct sqlite3_index_orderby {
     int iColumn;              /* Column number */






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







|
>
>
>





|







5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
** strategy. A cost of N indicates that the cost of the strategy is similar
** to a linear scan of an SQLite table with N rows. A cost of log(N) 
** indicates that the expense of the operation is similar to that of a
** binary search on a unique indexed field of an SQLite table with N rows.
**
** ^The estimatedRows value is an estimate of the number of rows that
** will be returned by the strategy.
**
** The xBestIndex method may optionally populate the idxFlags field with a 
** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
** assumes that the strategy may visit at most one row. 
**
** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
** SQLite also assumes that if a call to the xUpdate() method is made as
** part of the same statement to delete or update a virtual table row and the
** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
** any database changes. In other words, if the xUpdate() returns
** SQLITE_CONSTRAINT, the database contents must be exactly as they were
** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
** the xUpdate method are automatically rolled back by SQLite.
**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
** structure for SQLite version 3.8.2. If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting 
** to read or write the estimatedRows field are undefined (but are likely 
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
** value greater than or equal to 3008002. Similarly, the idxFlags field
** was added for version 3.9.0. It may therefore only be used if
** sqlite3_libversion_number() returns a value greater than or equal to
** 3009000.
*/
struct sqlite3_index_info {
  /* Inputs */
  int nConstraint;           /* Number of entries in aConstraint */
  struct sqlite3_index_constraint {
     int iColumn;              /* Column constrained.  -1 for ROWID */
     unsigned char op;         /* Constraint operator */
     unsigned char usable;     /* True if this constraint is usable */
     int iTermOffset;          /* Used internally - xBestIndex should ignore */
  } *aConstraint;            /* Table of WHERE clause constraints */
  int nOrderBy;              /* Number of terms in the ORDER BY clause */
  struct sqlite3_index_orderby {
     int iColumn;              /* Column number */
5629
5630
5631
5632
5633
5634
5635




5636
5637





5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651



5652
5653
5654
5655
5656
5657
5658
  int idxNum;                /* Number used to identify the index */
  char *idxStr;              /* String, possibly obtained from sqlite3_malloc */
  int needToFreeIdxStr;      /* Free idxStr using sqlite3_free() if true */
  int orderByConsumed;       /* True if output is already ordered */
  double estimatedCost;           /* Estimated cost of using this index */
  /* Fields below are only available in SQLite 3.8.2 and later */
  sqlite3_int64 estimatedRows;    /* Estimated number of rows returned */




};






/*
** CAPI3REF: Virtual Table Constraint Operator Codes
**
** These macros defined the allowed values for the
** [sqlite3_index_info].aConstraint[].op field.  Each value represents
** an operator that is part of a constraint term in the wHERE clause of
** a query that uses a [virtual table].
*/
#define SQLITE_INDEX_CONSTRAINT_EQ    2
#define SQLITE_INDEX_CONSTRAINT_GT    4
#define SQLITE_INDEX_CONSTRAINT_LE    8
#define SQLITE_INDEX_CONSTRAINT_LT    16
#define SQLITE_INDEX_CONSTRAINT_GE    32
#define SQLITE_INDEX_CONSTRAINT_MATCH 64




/*
** CAPI3REF: Register A Virtual Table Implementation
** METHOD: sqlite3
**
** ^These routines are used to register a new [virtual table module] name.
** ^Module names must be registered before






>
>
>
>


>
>
>
>
>








|
|
|
|
|
|
>
>
>







5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
  int idxNum;                /* Number used to identify the index */
  char *idxStr;              /* String, possibly obtained from sqlite3_malloc */
  int needToFreeIdxStr;      /* Free idxStr using sqlite3_free() if true */
  int orderByConsumed;       /* True if output is already ordered */
  double estimatedCost;           /* Estimated cost of using this index */
  /* Fields below are only available in SQLite 3.8.2 and later */
  sqlite3_int64 estimatedRows;    /* Estimated number of rows returned */
  /* Fields below are only available in SQLite 3.9.0 and later */
  int idxFlags;              /* Mask of SQLITE_INDEX_SCAN_* flags */
  /* Fields below are only available in SQLite 3.10.0 and later */
  sqlite3_uint64 colUsed;    /* Input: Mask of columns used by statement */
};

/*
** CAPI3REF: Virtual Table Scan Flags
*/
#define SQLITE_INDEX_SCAN_UNIQUE      1     /* Scan visits at most 1 row */

/*
** CAPI3REF: Virtual Table Constraint Operator Codes
**
** These macros defined the allowed values for the
** [sqlite3_index_info].aConstraint[].op field.  Each value represents
** an operator that is part of a constraint term in the wHERE clause of
** a query that uses a [virtual table].
*/
#define SQLITE_INDEX_CONSTRAINT_EQ      2
#define SQLITE_INDEX_CONSTRAINT_GT      4
#define SQLITE_INDEX_CONSTRAINT_LE      8
#define SQLITE_INDEX_CONSTRAINT_LT     16
#define SQLITE_INDEX_CONSTRAINT_GE     32
#define SQLITE_INDEX_CONSTRAINT_MATCH  64
#define SQLITE_INDEX_CONSTRAINT_LIKE   65
#define SQLITE_INDEX_CONSTRAINT_GLOB   66
#define SQLITE_INDEX_CONSTRAINT_REGEXP 67

/*
** CAPI3REF: Register A Virtual Table Implementation
** METHOD: sqlite3
**
** ^These routines are used to register a new [virtual table module] name.
** ^Module names must be registered before
6088
6089
6090
6091
6092
6093
6094



6095
6096
6097
6098
6099
6100
6101
** <li>  SQLITE_MUTEX_STATIC_OPEN
** <li>  SQLITE_MUTEX_STATIC_PRNG
** <li>  SQLITE_MUTEX_STATIC_LRU
** <li>  SQLITE_MUTEX_STATIC_PMEM
** <li>  SQLITE_MUTEX_STATIC_APP1
** <li>  SQLITE_MUTEX_STATIC_APP2
** <li>  SQLITE_MUTEX_STATIC_APP3



** </ul>
**
** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
** cause sqlite3_mutex_alloc() to create
** a new mutex.  ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
** The mutex implementation does not need to make a distinction






>
>
>







6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
** <li>  SQLITE_MUTEX_STATIC_OPEN
** <li>  SQLITE_MUTEX_STATIC_PRNG
** <li>  SQLITE_MUTEX_STATIC_LRU
** <li>  SQLITE_MUTEX_STATIC_PMEM
** <li>  SQLITE_MUTEX_STATIC_APP1
** <li>  SQLITE_MUTEX_STATIC_APP2
** <li>  SQLITE_MUTEX_STATIC_APP3
** <li>  SQLITE_MUTEX_STATIC_VFS1
** <li>  SQLITE_MUTEX_STATIC_VFS2
** <li>  SQLITE_MUTEX_STATIC_VFS3
** </ul>
**
** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
** cause sqlite3_mutex_alloc() to create
** a new mutex.  ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
** The mutex implementation does not need to make a distinction
6505
6506
6507
6508
6509
6510
6511
6512

6513
6514
6515
6516
6517
6518
6519
** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(<dt>SQLITE_STATUS_SCRATCH_SIZE</dt>
** <dd>This parameter records the largest memory allocation request
** handed to [scratch memory allocator].  Only the value returned in the
** *pHighwater parameter to [sqlite3_status()] is of interest.  
** The value written into the *pCurrent parameter is undefined.</dd>)^
**
** [[SQLITE_STATUS_PARSER_STACK]] ^(<dt>SQLITE_STATUS_PARSER_STACK</dt>
** <dd>This parameter records the deepest parser stack.  It is only

** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].</dd>)^
** </dl>
**
** New status parameters may be added from time to time.
*/
#define SQLITE_STATUS_MEMORY_USED          0
#define SQLITE_STATUS_PAGECACHE_USED       1






|
>







6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(<dt>SQLITE_STATUS_SCRATCH_SIZE</dt>
** <dd>This parameter records the largest memory allocation request
** handed to [scratch memory allocator].  Only the value returned in the
** *pHighwater parameter to [sqlite3_status()] is of interest.  
** The value written into the *pCurrent parameter is undefined.</dd>)^
**
** [[SQLITE_STATUS_PARSER_STACK]] ^(<dt>SQLITE_STATUS_PARSER_STACK</dt>
** <dd>The *pHighwater parameter records the deepest parser stack. 
** The *pCurrent value is undefined.  The *pHighwater value is only
** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].</dd>)^
** </dl>
**
** New status parameters may be added from time to time.
*/
#define SQLITE_STATUS_MEMORY_USED          0
#define SQLITE_STATUS_PAGECACHE_USED       1
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306


7307
7308























7309
7310
7311
7312
7313
7314
7315
*/
SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);

/*
** CAPI3REF: String Globbing
*
** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches
** the glob pattern P, and it returns non-zero if string X does not match
** the glob pattern P.  ^The definition of glob pattern matching used in
** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
** SQL dialect used by SQLite.  ^The sqlite3_strglob(P,X) function is case
** sensitive.
**
** Note that this routine returns zero on a match and non-zero if the strings
** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].


*/
SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
























/*
** CAPI3REF: Error Logging Interface
**
** ^The [sqlite3_log()] interface writes a message into the [error log]
** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
** ^If logging is enabled, the zFormat string and subsequent arguments are






|
|
|

|
|



>
>


>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
*/
SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);

/*
** CAPI3REF: String Globbing
*
** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if
** string X matches the [GLOB] pattern P.
** ^The definition of [GLOB] pattern matching used in
** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
** SQL dialect understood by SQLite.  ^The [sqlite3_strglob(P,X)] function
** is case sensitive.
**
** Note that this routine returns zero on a match and non-zero if the strings
** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
**
** See also: [sqlite3_strlike()].
*/
SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);

/*
** CAPI3REF: String LIKE Matching
*
** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if
** string X matches the [LIKE] pattern P with escape character E.
** ^The definition of [LIKE] pattern matching used in
** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E"
** operator in the SQL dialect understood by SQLite.  ^For "X LIKE P" without
** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0.
** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case
** insensitive - equivalent upper and lower case ASCII characters match
** one another.
**
** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though
** only ASCII characters are case folded.
**
** Note that this routine returns zero on a match and non-zero if the strings
** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
**
** See also: [sqlite3_strglob()].
*/
SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);

/*
** CAPI3REF: Error Logging Interface
**
** ^The [sqlite3_log()] interface writes a message into the [error log]
** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
** ^If logging is enabled, the zFormat string and subsequent arguments are
7723
7724
7725
7726
7727
7728
7729



























































































































7730
7731
7732
7733
7734
7735
7736
** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
**
** This API is only available if the library is built with pre-processor
** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
*/
SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);





























































































































/*
** Undo the hack that converts floating point types to integer for
** builds on processors without floating point support.
*/
#ifdef SQLITE_OMIT_FLOATING_POINT
# undef double






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







7850
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
**
** This API is only available if the library is built with pre-processor
** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
*/
SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);

/*
** CAPI3REF: Flush caches to disk mid-transaction
**
** ^If a write-transaction is open on [database connection] D when the
** [sqlite3_db_cacheflush(D)] interface invoked, any dirty
** pages in the pager-cache that are not currently in use are written out 
** to disk. A dirty page may be in use if a database cursor created by an
** active SQL statement is reading from it, or if it is page 1 of a database
** file (page 1 is always "in use").  ^The [sqlite3_db_cacheflush(D)]
** interface flushes caches for all schemas - "main", "temp", and
** any [attached] databases.
**
** ^If this function needs to obtain extra database locks before dirty pages 
** can be flushed to disk, it does so. ^If those locks cannot be obtained 
** immediately and there is a busy-handler callback configured, it is invoked
** in the usual manner. ^If the required lock still cannot be obtained, then
** the database is skipped and an attempt made to flush any dirty pages
** belonging to the next (if any) database. ^If any databases are skipped
** because locks cannot be obtained, but no other error occurs, this
** function returns SQLITE_BUSY.
**
** ^If any other error occurs while flushing dirty pages to disk (for
** example an IO error or out-of-memory condition), then processing is
** abandoned and an SQLite [error code] is returned to the caller immediately.
**
** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK.
**
** ^This function does not set the database handle error code or message
** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
*/
SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);

/*
** CAPI3REF: Database Snapshot
** KEYWORDS: {snapshot}
** EXPERIMENTAL
**
** An instance of the snapshot object records the state of a [WAL mode]
** database for some specific point in history.
**
** In [WAL mode], multiple [database connections] that are open on the
** same database file can each be reading a different historical version
** of the database file.  When a [database connection] begins a read
** transaction, that connection sees an unchanging copy of the database
** as it existed for the point in time when the transaction first started.
** Subsequent changes to the database from other connections are not seen
** by the reader until a new read transaction is started.
**
** The sqlite3_snapshot object records state information about an historical
** version of the database file so that it is possible to later open a new read
** transaction that sees that historical version of the database rather than
** the most recent version.
**
** The constructor for this object is [sqlite3_snapshot_get()].  The
** [sqlite3_snapshot_open()] method causes a fresh read transaction to refer
** to an historical snapshot (if possible).  The destructor for 
** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
*/
typedef struct sqlite3_snapshot sqlite3_snapshot;

/*
** CAPI3REF: Record A Database Snapshot
** EXPERIMENTAL
**
** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
** new [sqlite3_snapshot] object that records the current state of
** schema S in database connection D.  ^On success, the
** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
** ^If schema S of [database connection] D is not a [WAL mode] database
** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)]
** leaves the *P value unchanged and returns an appropriate [error code].
**
** The [sqlite3_snapshot] object returned from a successful call to
** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
** to avoid a memory leak.
**
** The [sqlite3_snapshot_get()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
  sqlite3 *db,
  const char *zSchema,
  sqlite3_snapshot **ppSnapshot
);

/*
** CAPI3REF: Start a read transaction on an historical snapshot
** EXPERIMENTAL
**
** ^The [sqlite3_snapshot_open(D,S,P)] interface attempts to move the
** read transaction that is currently open on schema S of
** [database connection] D so that it refers to historical [snapshot] P.
** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success
** or an appropriate [error code] if it fails.
**
** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be
** the first operation, apart from other sqlite3_snapshot_open() calls,
** following the [BEGIN] that starts a new read transaction.
** ^A [snapshot] will fail to open if it has been overwritten by a 
** [checkpoint].  
**
** The [sqlite3_snapshot_open()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
  sqlite3 *db,
  const char *zSchema,
  sqlite3_snapshot *pSnapshot
);

/*
** CAPI3REF: Destroy a snapshot
** EXPERIMENTAL
**
** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
** The application must eventually free every [sqlite3_snapshot] object
** using this routine to avoid a memory leak.
**
** The [sqlite3_snapshot_free()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*);

/*
** Undo the hack that converts floating point types to integer for
** builds on processors without floating point support.
*/
#ifdef SQLITE_OMIT_FLOATING_POINT
# undef double
7853
7854
7855
7856
7857
7858
7859
7860



































































































































































































































































































































































































































































































































































































#ifdef __cplusplus
}  /* end of the 'extern "C"' block */
#endif

#endif  /* ifndef _SQLITE3RTREE_H_ */










































































































































































































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
8103
8104
8105
8106
8107
8108
8109
8110
8111
8112
8113
8114
8115
8116
8117
8118
8119
8120
8121
8122
8123
8124
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150
8151
8152
8153
8154
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178
8179
8180
8181
8182
8183
8184
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208
8209
8210
8211
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259
8260
8261
8262
8263
8264
8265
8266
8267
8268
8269
8270
8271
8272
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292
8293
8294
8295
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318
8319
8320
8321
8322
8323
8324
8325
8326
8327
8328
8329
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340
8341
8342
8343
8344
8345
8346
8347
8348
8349
8350
8351
8352
8353
8354
8355
8356
8357
8358
8359
8360
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395
8396
8397
8398
8399
8400
8401
8402
8403
8404
8405
8406
8407
8408
8409
8410
8411
8412
8413
8414
8415
8416
8417
8418
8419
8420
8421
8422
8423
8424
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434
8435
8436
8437
8438
8439
8440
8441
8442
8443
8444
8445
8446
8447
8448
8449
8450
8451
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461
8462
8463
8464
8465
8466
8467
8468
8469
8470
8471
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485
8486
8487
8488
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500
8501
8502
8503
8504
8505
8506
8507
8508
8509
8510
8511
8512
8513
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
8538
8539
8540
8541
8542
8543
8544
8545
8546
8547
8548
8549
8550
8551
8552
8553
8554
8555
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565
8566
8567
8568
8569
8570
8571
8572
8573
8574
8575
8576
8577
8578
8579
8580
8581
8582
8583
8584
8585
8586
8587
8588
8589
8590
8591
8592
8593
8594
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644
8645
8646
8647
8648
8649
8650
8651
8652
8653
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688

#ifdef __cplusplus
}  /* end of the 'extern "C"' block */
#endif

#endif  /* ifndef _SQLITE3RTREE_H_ */

/*
** 2014 May 31
**
** The author disclaims copyright to this source code.  In place of
** a legal notice, here is a blessing:
**
**    May you do good and not evil.
**    May you find forgiveness for yourself and forgive others.
**    May you share freely, never taking more than you give.
**
******************************************************************************
**
** Interfaces to extend FTS5. Using the interfaces defined in this file, 
** FTS5 may be extended with:
**
**     * custom tokenizers, and
**     * custom auxiliary functions.
*/


#ifndef _FTS5_H
#define _FTS5_H


#ifdef __cplusplus
extern "C" {
#endif

/*************************************************************************
** CUSTOM AUXILIARY FUNCTIONS
**
** Virtual table implementations may overload SQL functions by implementing
** the sqlite3_module.xFindFunction() method.
*/

typedef struct Fts5ExtensionApi Fts5ExtensionApi;
typedef struct Fts5Context Fts5Context;
typedef struct Fts5PhraseIter Fts5PhraseIter;

typedef void (*fts5_extension_function)(
  const Fts5ExtensionApi *pApi,   /* API offered by current FTS version */
  Fts5Context *pFts,              /* First arg to pass to pApi functions */
  sqlite3_context *pCtx,          /* Context for returning result/error */
  int nVal,                       /* Number of values in apVal[] array */
  sqlite3_value **apVal           /* Array of trailing arguments */
);

struct Fts5PhraseIter {
  const unsigned char *a;
  const unsigned char *b;
};

/*
** EXTENSION API FUNCTIONS
**
** xUserData(pFts):
**   Return a copy of the context pointer the extension function was 
**   registered with.
**
** xColumnTotalSize(pFts, iCol, pnToken):
**   If parameter iCol is less than zero, set output variable *pnToken
**   to the total number of tokens in the FTS5 table. Or, if iCol is
**   non-negative but less than the number of columns in the table, return
**   the total number of tokens in column iCol, considering all rows in 
**   the FTS5 table.
**
**   If parameter iCol is greater than or equal to the number of columns
**   in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
**   an OOM condition or IO error), an appropriate SQLite error code is 
**   returned.
**
** xColumnCount(pFts):
**   Return the number of columns in the table.
**
** xColumnSize(pFts, iCol, pnToken):
**   If parameter iCol is less than zero, set output variable *pnToken
**   to the total number of tokens in the current row. Or, if iCol is
**   non-negative but less than the number of columns in the table, set
**   *pnToken to the number of tokens in column iCol of the current row.
**
**   If parameter iCol is greater than or equal to the number of columns
**   in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
**   an OOM condition or IO error), an appropriate SQLite error code is 
**   returned.
**
**   This function may be quite inefficient if used with an FTS5 table
**   created with the "columnsize=0" option.
**
** xColumnText:
**   This function attempts to retrieve the text of column iCol of the
**   current document. If successful, (*pz) is set to point to a buffer
**   containing the text in utf-8 encoding, (*pn) is set to the size in bytes
**   (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
**   if an error occurs, an SQLite error code is returned and the final values
**   of (*pz) and (*pn) are undefined.
**
** xPhraseCount:
**   Returns the number of phrases in the current query expression.
**
** xPhraseSize:
**   Returns the number of tokens in phrase iPhrase of the query. Phrases
**   are numbered starting from zero.
**
** xInstCount:
**   Set *pnInst to the total number of occurrences of all phrases within
**   the query within the current row. Return SQLITE_OK if successful, or
**   an error code (i.e. SQLITE_NOMEM) if an error occurs.
**
**   This API can be quite slow if used with an FTS5 table created with the
**   "detail=none" or "detail=column" option. If the FTS5 table is created 
**   with either "detail=none" or "detail=column" and "content=" option 
**   (i.e. if it is a contentless table), then this API always returns 0.
**
** xInst:
**   Query for the details of phrase match iIdx within the current row.
**   Phrase matches are numbered starting from zero, so the iIdx argument
**   should be greater than or equal to zero and smaller than the value
**   output by xInstCount().
**
**   Usually, output parameter *piPhrase is set to the phrase number, *piCol
**   to the column in which it occurs and *piOff the token offset of the
**   first token of the phrase. The exception is if the table was created
**   with the offsets=0 option specified. In this case *piOff is always
**   set to -1.
**
**   Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM) 
**   if an error occurs.
**
**   This API can be quite slow if used with an FTS5 table created with the
**   "detail=none" or "detail=column" option. 
**
** xRowid:
**   Returns the rowid of the current row.
**
** xTokenize:
**   Tokenize text using the tokenizer belonging to the FTS5 table.
**
** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
**   This API function is used to query the FTS table for phrase iPhrase
**   of the current query. Specifically, a query equivalent to:
**
**       ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
**
**   with $p set to a phrase equivalent to the phrase iPhrase of the
**   current query is executed. For each row visited, the callback function
**   passed as the fourth argument is invoked. The context and API objects 
**   passed to the callback function may be used to access the properties of
**   each matched row. Invoking Api.xUserData() returns a copy of the pointer
**   passed as the third argument to pUserData.
**
**   If the callback function returns any value other than SQLITE_OK, the
**   query is abandoned and the xQueryPhrase function returns immediately.
**   If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
**   Otherwise, the error code is propagated upwards.
**
**   If the query runs to completion without incident, SQLITE_OK is returned.
**   Or, if some error occurs before the query completes or is aborted by
**   the callback, an SQLite error code is returned.
**
**
** xSetAuxdata(pFts5, pAux, xDelete)
**
**   Save the pointer passed as the second argument as the extension functions 
**   "auxiliary data". The pointer may then be retrieved by the current or any
**   future invocation of the same fts5 extension function made as part of
**   of the same MATCH query using the xGetAuxdata() API.
**
**   Each extension function is allocated a single auxiliary data slot for
**   each FTS query (MATCH expression). If the extension function is invoked 
**   more than once for a single FTS query, then all invocations share a 
**   single auxiliary data context.
**
**   If there is already an auxiliary data pointer when this function is
**   invoked, then it is replaced by the new pointer. If an xDelete callback
**   was specified along with the original pointer, it is invoked at this
**   point.
**
**   The xDelete callback, if one is specified, is also invoked on the
**   auxiliary data pointer after the FTS5 query has finished.
**
**   If an error (e.g. an OOM condition) occurs within this function, an
**   the auxiliary data is set to NULL and an error code returned. If the
**   xDelete parameter was not NULL, it is invoked on the auxiliary data
**   pointer before returning.
**
**
** xGetAuxdata(pFts5, bClear)
**
**   Returns the current auxiliary data pointer for the fts5 extension 
**   function. See the xSetAuxdata() method for details.
**
**   If the bClear argument is non-zero, then the auxiliary data is cleared
**   (set to NULL) before this function returns. In this case the xDelete,
**   if any, is not invoked.
**
**
** xRowCount(pFts5, pnRow)
**
**   This function is used to retrieve the total number of rows in the table.
**   In other words, the same value that would be returned by:
**
**        SELECT count(*) FROM ftstable;
**
** xPhraseFirst()
**   This function is used, along with type Fts5PhraseIter and the xPhraseNext
**   method, to iterate through all instances of a single query phrase within
**   the current row. This is the same information as is accessible via the
**   xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
**   to use, this API may be faster under some circumstances. To iterate 
**   through instances of phrase iPhrase, use the following code:
**
**       Fts5PhraseIter iter;
**       int iCol, iOff;
**       for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
**           iCol>=0;
**           pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
**       ){
**         // An instance of phrase iPhrase at offset iOff of column iCol
**       }
**
**   The Fts5PhraseIter structure is defined above. Applications should not
**   modify this structure directly - it should only be used as shown above
**   with the xPhraseFirst() and xPhraseNext() API methods (and by
**   xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
**
**   This API can be quite slow if used with an FTS5 table created with the
**   "detail=none" or "detail=column" option. If the FTS5 table is created 
**   with either "detail=none" or "detail=column" and "content=" option 
**   (i.e. if it is a contentless table), then this API always iterates
**   through an empty set (all calls to xPhraseFirst() set iCol to -1).
**
** xPhraseNext()
**   See xPhraseFirst above.
**
** xPhraseFirstColumn()
**   This function and xPhraseNextColumn() are similar to the xPhraseFirst()
**   and xPhraseNext() APIs described above. The difference is that instead
**   of iterating through all instances of a phrase in the current row, these
**   APIs are used to iterate through the set of columns in the current row
**   that contain one or more instances of a specified phrase. For example:
**
**       Fts5PhraseIter iter;
**       int iCol;
**       for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
**           iCol>=0;
**           pApi->xPhraseNextColumn(pFts, &iter, &iCol)
**       ){
**         // Column iCol contains at least one instance of phrase iPhrase
**       }
**
**   This API can be quite slow if used with an FTS5 table created with the
**   "detail=none" option. If the FTS5 table is created with either 
**   "detail=none" "content=" option (i.e. if it is a contentless table), 
**   then this API always iterates through an empty set (all calls to 
**   xPhraseFirstColumn() set iCol to -1).
**
**   The information accessed using this API and its companion
**   xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
**   (or xInst/xInstCount). The chief advantage of this API is that it is
**   significantly more efficient than those alternatives when used with
**   "detail=column" tables.  
**
** xPhraseNextColumn()
**   See xPhraseFirstColumn above.
*/
struct Fts5ExtensionApi {
  int iVersion;                   /* Currently always set to 3 */

  void *(*xUserData)(Fts5Context*);

  int (*xColumnCount)(Fts5Context*);
  int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
  int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);

  int (*xTokenize)(Fts5Context*, 
    const char *pText, int nText, /* Text to tokenize */
    void *pCtx,                   /* Context passed to xToken() */
    int (*xToken)(void*, int, const char*, int, int, int)       /* Callback */
  );

  int (*xPhraseCount)(Fts5Context*);
  int (*xPhraseSize)(Fts5Context*, int iPhrase);

  int (*xInstCount)(Fts5Context*, int *pnInst);
  int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);

  sqlite3_int64 (*xRowid)(Fts5Context*);
  int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
  int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);

  int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
    int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
  );
  int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
  void *(*xGetAuxdata)(Fts5Context*, int bClear);

  int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
  void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);

  int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
  void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
};

/* 
** CUSTOM AUXILIARY FUNCTIONS
*************************************************************************/

/*************************************************************************
** CUSTOM TOKENIZERS
**
** Applications may also register custom tokenizer types. A tokenizer 
** is registered by providing fts5 with a populated instance of the 
** following structure. All structure methods must be defined, setting
** any member of the fts5_tokenizer struct to NULL leads to undefined
** behaviour. The structure methods are expected to function as follows:
**
** xCreate:
**   This function is used to allocate and inititalize a tokenizer instance.
**   A tokenizer instance is required to actually tokenize text.
**
**   The first argument passed to this function is a copy of the (void*)
**   pointer provided by the application when the fts5_tokenizer object
**   was registered with FTS5 (the third argument to xCreateTokenizer()). 
**   The second and third arguments are an array of nul-terminated strings
**   containing the tokenizer arguments, if any, specified following the
**   tokenizer name as part of the CREATE VIRTUAL TABLE statement used
**   to create the FTS5 table.
**
**   The final argument is an output variable. If successful, (*ppOut) 
**   should be set to point to the new tokenizer handle and SQLITE_OK
**   returned. If an error occurs, some value other than SQLITE_OK should
**   be returned. In this case, fts5 assumes that the final value of *ppOut 
**   is undefined.
**
** xDelete:
**   This function is invoked to delete a tokenizer handle previously
**   allocated using xCreate(). Fts5 guarantees that this function will
**   be invoked exactly once for each successful call to xCreate().
**
** xTokenize:
**   This function is expected to tokenize the nText byte string indicated 
**   by argument pText. pText may or may not be nul-terminated. The first
**   argument passed to this function is a pointer to an Fts5Tokenizer object
**   returned by an earlier call to xCreate().
**
**   The second argument indicates the reason that FTS5 is requesting
**   tokenization of the supplied text. This is always one of the following
**   four values:
**
**   <ul><li> <b>FTS5_TOKENIZE_DOCUMENT</b> - A document is being inserted into
**            or removed from the FTS table. The tokenizer is being invoked to
**            determine the set of tokens to add to (or delete from) the
**            FTS index.
**
**       <li> <b>FTS5_TOKENIZE_QUERY</b> - A MATCH query is being executed 
**            against the FTS index. The tokenizer is being called to tokenize 
**            a bareword or quoted string specified as part of the query.
**
**       <li> <b>(FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX)</b> - Same as
**            FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
**            followed by a "*" character, indicating that the last token
**            returned by the tokenizer will be treated as a token prefix.
**
**       <li> <b>FTS5_TOKENIZE_AUX</b> - The tokenizer is being invoked to 
**            satisfy an fts5_api.xTokenize() request made by an auxiliary
**            function. Or an fts5_api.xColumnSize() request made by the same
**            on a columnsize=0 database.  
**   </ul>
**
**   For each token in the input string, the supplied callback xToken() must
**   be invoked. The first argument to it should be a copy of the pointer
**   passed as the second argument to xTokenize(). The third and fourth
**   arguments are a pointer to a buffer containing the token text, and the
**   size of the token in bytes. The 4th and 5th arguments are the byte offsets
**   of the first byte of and first byte immediately following the text from
**   which the token is derived within the input.
**
**   The second argument passed to the xToken() callback ("tflags") should
**   normally be set to 0. The exception is if the tokenizer supports 
**   synonyms. In this case see the discussion below for details.
**
**   FTS5 assumes the xToken() callback is invoked for each token in the 
**   order that they occur within the input text.
**
**   If an xToken() callback returns any value other than SQLITE_OK, then
**   the tokenization should be abandoned and the xTokenize() method should
**   immediately return a copy of the xToken() return value. Or, if the
**   input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
**   if an error occurs with the xTokenize() implementation itself, it
**   may abandon the tokenization and return any error code other than
**   SQLITE_OK or SQLITE_DONE.
**
** SYNONYM SUPPORT
**
**   Custom tokenizers may also support synonyms. Consider a case in which a
**   user wishes to query for a phrase such as "first place". Using the 
**   built-in tokenizers, the FTS5 query 'first + place' will match instances
**   of "first place" within the document set, but not alternative forms
**   such as "1st place". In some applications, it would be better to match
**   all instances of "first place" or "1st place" regardless of which form
**   the user specified in the MATCH query text.
**
**   There are several ways to approach this in FTS5:
**
**   <ol><li> By mapping all synonyms to a single token. In this case, the 
**            In the above example, this means that the tokenizer returns the
**            same token for inputs "first" and "1st". Say that token is in
**            fact "first", so that when the user inserts the document "I won
**            1st place" entries are added to the index for tokens "i", "won",
**            "first" and "place". If the user then queries for '1st + place',
**            the tokenizer substitutes "first" for "1st" and the query works
**            as expected.
**
**       <li> By adding multiple synonyms for a single term to the FTS index.
**            In this case, when tokenizing query text, the tokenizer may 
**            provide multiple synonyms for a single term within the document.
**            FTS5 then queries the index for each synonym individually. For
**            example, faced with the query:
**
**   <codeblock>
**     ... MATCH 'first place'</codeblock>
**
**            the tokenizer offers both "1st" and "first" as synonyms for the
**            first token in the MATCH query and FTS5 effectively runs a query 
**            similar to:
**
**   <codeblock>
**     ... MATCH '(first OR 1st) place'</codeblock>
**
**            except that, for the purposes of auxiliary functions, the query
**            still appears to contain just two phrases - "(first OR 1st)" 
**            being treated as a single phrase.
**
**       <li> By adding multiple synonyms for a single term to the FTS index.
**            Using this method, when tokenizing document text, the tokenizer
**            provides multiple synonyms for each token. So that when a 
**            document such as "I won first place" is tokenized, entries are
**            added to the FTS index for "i", "won", "first", "1st" and
**            "place".
**
**            This way, even if the tokenizer does not provide synonyms
**            when tokenizing query text (it should not - to do would be
**            inefficient), it doesn't matter if the user queries for 
**            'first + place' or '1st + place', as there are entires in the
**            FTS index corresponding to both forms of the first token.
**   </ol>
**
**   Whether it is parsing document or query text, any call to xToken that
**   specifies a <i>tflags</i> argument with the FTS5_TOKEN_COLOCATED bit
**   is considered to supply a synonym for the previous token. For example,
**   when parsing the document "I won first place", a tokenizer that supports
**   synonyms would call xToken() 5 times, as follows:
**
**   <codeblock>
**       xToken(pCtx, 0, "i",                      1,  0,  1);
**       xToken(pCtx, 0, "won",                    3,  2,  5);
**       xToken(pCtx, 0, "first",                  5,  6, 11);
**       xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3,  6, 11);
**       xToken(pCtx, 0, "place",                  5, 12, 17);
**</codeblock>
**
**   It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
**   xToken() is called. Multiple synonyms may be specified for a single token
**   by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence. 
**   There is no limit to the number of synonyms that may be provided for a
**   single token.
**
**   In many cases, method (1) above is the best approach. It does not add 
**   extra data to the FTS index or require FTS5 to query for multiple terms,
**   so it is efficient in terms of disk space and query speed. However, it
**   does not support prefix queries very well. If, as suggested above, the
**   token "first" is subsituted for "1st" by the tokenizer, then the query:
**
**   <codeblock>
**     ... MATCH '1s*'</codeblock>
**
**   will not match documents that contain the token "1st" (as the tokenizer
**   will probably not map "1s" to any prefix of "first").
**
**   For full prefix support, method (3) may be preferred. In this case, 
**   because the index contains entries for both "first" and "1st", prefix
**   queries such as 'fi*' or '1s*' will match correctly. However, because
**   extra entries are added to the FTS index, this method uses more space
**   within the database.
**
**   Method (2) offers a midpoint between (1) and (3). Using this method,
**   a query such as '1s*' will match documents that contain the literal 
**   token "1st", but not "first" (assuming the tokenizer is not able to
**   provide synonyms for prefixes). However, a non-prefix query like '1st'
**   will match against "1st" and "first". This method does not require
**   extra disk space, as no extra entries are added to the FTS index. 
**   On the other hand, it may require more CPU cycles to run MATCH queries,
**   as separate queries of the FTS index are required for each synonym.
**
**   When using methods (2) or (3), it is important that the tokenizer only
**   provide synonyms when tokenizing document text (method (2)) or query
**   text (method (3)), not both. Doing so will not cause any errors, but is
**   inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
typedef struct fts5_tokenizer fts5_tokenizer;
struct fts5_tokenizer {
  int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
  void (*xDelete)(Fts5Tokenizer*);
  int (*xTokenize)(Fts5Tokenizer*, 
      void *pCtx,
      int flags,            /* Mask of FTS5_TOKENIZE_* flags */
      const char *pText, int nText, 
      int (*xToken)(
        void *pCtx,         /* Copy of 2nd argument to xTokenize() */
        int tflags,         /* Mask of FTS5_TOKEN_* flags */
        const char *pToken, /* Pointer to buffer containing token */
        int nToken,         /* Size of token in bytes */
        int iStart,         /* Byte offset of token within input text */
        int iEnd            /* Byte offset of end of token within input text */
      )
  );
};

/* Flags that may be passed as the third argument to xTokenize() */
#define FTS5_TOKENIZE_QUERY     0x0001
#define FTS5_TOKENIZE_PREFIX    0x0002
#define FTS5_TOKENIZE_DOCUMENT  0x0004
#define FTS5_TOKENIZE_AUX       0x0008

/* Flags that may be passed by the tokenizer implementation back to FTS5
** as the third argument to the supplied xToken callback. */
#define FTS5_TOKEN_COLOCATED    0x0001      /* Same position as prev. token */

/*
** END OF CUSTOM TOKENIZERS
*************************************************************************/

/*************************************************************************
** FTS5 EXTENSION REGISTRATION API
*/
typedef struct fts5_api fts5_api;
struct fts5_api {
  int iVersion;                   /* Currently always set to 2 */

  /* Create a new tokenizer */
  int (*xCreateTokenizer)(
    fts5_api *pApi,
    const char *zName,
    void *pContext,
    fts5_tokenizer *pTokenizer,
    void (*xDestroy)(void*)
  );

  /* Find an existing tokenizer */
  int (*xFindTokenizer)(
    fts5_api *pApi,
    const char *zName,
    void **ppContext,
    fts5_tokenizer *pTokenizer
  );

  /* Create a new auxiliary function */
  int (*xCreateFunction)(
    fts5_api *pApi,
    const char *zName,
    void *pContext,
    fts5_extension_function xFunction,
    void (*xDestroy)(void*)
  );
};

/*
** END OF REGISTRATION API
*************************************************************************/

#ifdef __cplusplus
}  /* end of the 'extern "C"' block */
#endif

#endif /* _FTS5_H */


Changes to src/stash.c.
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
**
**  fossil stash list ?-v|--verbose?
**  fossil stash ls ?-v|--verbose?
**
**     List all changes sets currently stashed.  Show information about
**     individual files in each changeset if -v or --verbose is used.
**
**  fossil stash show ?STASHID? ?DIFF-FLAGS?
**
**     Show the content of a stash
**
**  fossil stash pop
**  fossil stash apply ?STASHID?
**
**     Apply STASHID or the most recently create stash to the current






|







426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
**
**  fossil stash list ?-v|--verbose?
**  fossil stash ls ?-v|--verbose?
**
**     List all changes sets currently stashed.  Show information about
**     individual files in each changeset if -v or --verbose is used.
**
**  fossil stash show|cat ?STASHID? ?DIFF-FLAGS?
**
**     Show the content of a stash
**
**  fossil stash pop
**  fossil stash apply ?STASHID?
**
**     Apply STASHID or the most recently create stash to the current
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
**     directory would be if STASHID were applied.
**
** SUMMARY:
**  fossil stash
**  fossil stash save ?-m|--comment COMMENT? ?FILES...?
**  fossil stash snapshot ?-m|--comment COMMENT? ?FILES...?
**  fossil stash list|ls  ?-v|--verbose? ?-W|--width <num>?
**  fossil stash show ?STASHID? ?DIFF-OPTIONS?
**  fossil stash pop
**  fossil stash apply ?STASHID?
**  fossil stash goto ?STASHID?
**  fossil stash rm|drop ?STASHID? ?-a|--all?
**  fossil stash [g]diff ?STASHID? ?DIFF-OPTIONS?
*/
void stash_cmd(void){
  const char *zDb;
  const char *zCmd;
  int nCmd;
  int stashid = 0;
  undo_capture_command_line();
  db_must_be_within_tree();
  db_open_config(0);
  db_begin_transaction();
  zDb = db_name("localdb");
  db_multi_exec(zStashInit /*works-like:"%w,%w"*/, zDb, zDb);
  if( g.argc<=2 ){
    zCmd = "save";
  }else{
    zCmd = g.argv[2];






|













|







462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
**     directory would be if STASHID were applied.
**
** SUMMARY:
**  fossil stash
**  fossil stash save ?-m|--comment COMMENT? ?FILES...?
**  fossil stash snapshot ?-m|--comment COMMENT? ?FILES...?
**  fossil stash list|ls  ?-v|--verbose? ?-W|--width <num>?
**  fossil stash show|cat ?STASHID? ?DIFF-OPTIONS?
**  fossil stash pop
**  fossil stash apply ?STASHID?
**  fossil stash goto ?STASHID?
**  fossil stash rm|drop ?STASHID? ?-a|--all?
**  fossil stash [g]diff ?STASHID? ?DIFF-OPTIONS?
*/
void stash_cmd(void){
  const char *zDb;
  const char *zCmd;
  int nCmd;
  int stashid = 0;
  undo_capture_command_line();
  db_must_be_within_tree();
  db_open_config(0, 0);
  db_begin_transaction();
  zDb = db_name("localdb");
  db_multi_exec(zStashInit /*works-like:"%w,%w"*/, zDb, zDb);
  if( g.argc<=2 ){
    zCmd = "save";
  }else{
    zCmd = g.argv[2];
636
637
638
639
640
641
642

643
644
645
646
647
648
649
650





651



652
653
654
655
656
657
658
                  "(SELECT origname FROM stashfile WHERE stashid=%d)",
                  stashid);
    undo_finish();
  }else
  if( memcmp(zCmd, "diff", nCmd)==0
   || memcmp(zCmd, "gdiff", nCmd)==0
   || memcmp(zCmd, "show", nCmd)==0

  ){
    const char *zDiffCmd = 0;
    const char *zBinGlob = 0;
    int fIncludeBinary = 0;
    u64 diffFlags;

    if( find_option("tk",0,0)!=0 ){
      db_close(0);





      diff_tk((zCmd[0]=='s' ? "stash show" : "stash diff"), 3);



      return;
    }
    if( find_option("internal","i",0)==0 ){
      zDiffCmd = diff_command_external(memcmp(zCmd, "gdiff", nCmd)==0);
    }
    diffFlags = diff_options();
    if( find_option("verbose","v",0)!=0 ) diffFlags |= DIFF_VERBOSE;






>








>
>
>
>
>
|
>
>
>







636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
                  "(SELECT origname FROM stashfile WHERE stashid=%d)",
                  stashid);
    undo_finish();
  }else
  if( memcmp(zCmd, "diff", nCmd)==0
   || memcmp(zCmd, "gdiff", nCmd)==0
   || memcmp(zCmd, "show", nCmd)==0
   || memcmp(zCmd, "cat", nCmd)==0
  ){
    const char *zDiffCmd = 0;
    const char *zBinGlob = 0;
    int fIncludeBinary = 0;
    u64 diffFlags;

    if( find_option("tk",0,0)!=0 ){
      db_close(0);
        switch (zCmd[0]) {
        case 's':
        case 'c':
          diff_tk("stash show", 3);
          break;

        default:
          diff_tk("stash diff", 3);
        }
      return;
    }
    if( find_option("internal","i",0)==0 ){
      zDiffCmd = diff_command_external(memcmp(zCmd, "gdiff", nCmd)==0);
    }
    diffFlags = diff_options();
    if( find_option("verbose","v",0)!=0 ) diffFlags |= DIFF_VERBOSE;
Changes to src/stat.c.
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  if( g.perm.Admin ){
    style_submenu_element("URLs", "URLs and Checkouts", "urllist");
    style_submenu_element("Schema", "Repository Schema", "repo_schema");
    style_submenu_element("Web-Cache", "Web-Cache Stats", "cachestat");
  }
  style_submenu_element("Activity Reports", 0, "reports");
  style_submenu_element("SHA1 Collisions", 0, "hash-collisions");
  if( sqlite3_libversion_number()>=3008010 ){
    style_submenu_element("Table Sizes", 0, "repo-tabsize");
  }
  @ <table class="label-value">
  @ <tr><th>Repository&nbsp;Size:</th><td>
  fsize = file_size(g.zRepositoryName);
  bigSizeName(sizeof(zBuf), zBuf, fsize);
  @ %s(zBuf)






|







76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  if( g.perm.Admin ){
    style_submenu_element("URLs", "URLs and Checkouts", "urllist");
    style_submenu_element("Schema", "Repository Schema", "repo_schema");
    style_submenu_element("Web-Cache", "Web-Cache Stats", "cachestat");
  }
  style_submenu_element("Activity Reports", 0, "reports");
  style_submenu_element("SHA1 Collisions", 0, "hash-collisions");
  if( sqlite3_compileoption_used("ENABLE_DBSTAT_VTAB") ){
    style_submenu_element("Table Sizes", 0, "repo-tabsize");
  }
  @ <table class="label-value">
  @ <tr><th>Repository&nbsp;Size:</th><td>
  fsize = file_size(g.zRepositoryName);
  bigSizeName(sizeof(zBuf), zBuf, fsize);
  @ %s(zBuf)
Changes to src/statrep.c.
1
2
3
4
5
6
7
8
9
/*
** Copyright (c) 2013 Stephen Beal
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the Simplified BSD License (also
** known as the "2-Clause License" or "FreeBSD License".)

** This program is distributed in the hope that it will be useful,
** but without any warranty; without even the implied warranty of
|







1
2
3
4
5
6
7
8
9
/*
** Copyright (c) 2013 Stephan Beal
**
** This program is free software; you can redistribute it and/or
** modify it under the terms of the Simplified BSD License (also
** known as the "2-Clause License" or "FreeBSD License".)

** This program is distributed in the hope that it will be useful,
** but without any warranty; without even the implied warranty of
239
240
241
242
243
244
245

246
247
248
249
250
251
252
        showYearTotal = *zPrevYear;
        if(showYearTotal){
          rowClass = ++nRowNumber % 2;
          @ <tr class='row%d(rowClass)'>
          @ <td></td>
          @ <td colspan='2'>Yearly total: %d(nEventsPerYear)</td>
          @</tr>

        }
        nEventsPerYear = 0;
        memcpy(zPrevYear,zTimeframe,4);
        rowClass = ++nRowNumber % 2;
        @ <tr class='row%d(rowClass)'>
        @ <th colspan='3' class='statistics-report-row-year'>%s(zPrevYear)</th>
        @ </tr>






>







239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
        showYearTotal = *zPrevYear;
        if(showYearTotal){
          rowClass = ++nRowNumber % 2;
          @ <tr class='row%d(rowClass)'>
          @ <td></td>
          @ <td colspan='2'>Yearly total: %d(nEventsPerYear)</td>
          @</tr>
          showYearTotal = 0;
        }
        nEventsPerYear = 0;
        memcpy(zPrevYear,zTimeframe,4);
        rowClass = ++nRowNumber % 2;
        @ <tr class='row%d(rowClass)'>
        @ <th colspan='3' class='statistics-report-row-year'>%s(zPrevYear)</th>
        @ </tr>
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
                                        row colors */
  int nMaxEvents = 1;                /* max number of events for
                                        all rows. */
  stats_report_init_view();
  @ <h1>Timeline Events
  @ (%s(stats_report_label_for_type())) by User</h1>
  db_multi_exec(
    "CREATE TEMP TABLE piechart(amt,label);"
    "INSERT INTO piechart SELECT count(*), ifnull(euser,user) FROM v_reports"
                         " GROUP BY ifnull(euser,user) ORDER BY count(*) DESC;"
  );
  if( db_int(0, "SELECT count(*) FROM piechart")>=2 ){
    @ <center><svg width=700 height=400>
    piechart_render(700, 400, PIE_OTHER|PIE_PERCENT);
    @ </svg></centre><hr/>
  }






|
|







333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
                                        row colors */
  int nMaxEvents = 1;                /* max number of events for
                                        all rows. */
  stats_report_init_view();
  @ <h1>Timeline Events
  @ (%s(stats_report_label_for_type())) by User</h1>
  db_multi_exec(
    "CREATE TEMP VIEW piechart(amt,label) AS"
    " SELECT count(*), ifnull(euser,user) FROM v_reports"
                         " GROUP BY ifnull(euser,user) ORDER BY count(*) DESC;"
  );
  if( db_int(0, "SELECT count(*) FROM piechart")>=2 ){
    @ <center><svg width=700 height=400>
    piechart_render(700, 400, PIE_OTHER|PIE_PERCENT);
    @ </svg></centre><hr/>
  }
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492

493
494
495
496
497
498





499
500
501
502
503
504
505
506
  int nEventTotal = 0;               /* Total event count */
  int rowClass = 0;                  /* counter for alternating
                                        row colors */
  int nMaxEvents = 1;                /* max number of events for
                                        all rows. */
  Blob userFilter = empty_blob;      /* Optional user=johndoe query string */
  static const char *const daysOfWeek[] = {
  "Monday", "Tuesday", "Wednesday", "Thursday",
  "Friday", "Saturday", "Sunday"
  };

  stats_report_init_view();
  if( zUserName ){
    blob_appendf(&userFilter, "user=%s", zUserName);
  }
  db_prepare(&query,
               "SELECT cast(mtime %% 7 AS INTEGER) dow,"
               "       COUNT(*) AS eventCount"
               "  FROM v_reports"
               " WHERE ifnull(coalesce(euser,user,'')=%Q,1)"
               " GROUP BY dow ORDER BY dow", zUserName);
  @ <h1>Timeline Events (%h(stats_report_label_for_type())) by Day of the Week
  if( zUserName ){
    @ for user %h(zUserName)
  }
  @ </h1>
  db_multi_exec(
    "CREATE TEMP TABLE piechart(amt,label);"
    "INSERT INTO piechart"
    " SELECT count(*), cast(mtime %% 7 AS INT) FROM v_reports"
     " WHERE ifnull(coalesce(euser,user,'')=%Q,1)"
     " GROUP BY 2 ORDER BY 2;"
    "UPDATE piechart SET label = CASE label"

    "  WHEN 0 THEN 'Monday'"
    "  WHEN 1 THEN 'Tuesday'"
    "  WHEN 2 THEN 'Wednesday'"
    "  WHEN 3 THEN 'Thursday'"
    "  WHEN 4 THEN 'Friday'"
    "  WHEN 5 THEN 'Saturday'"





    "  ELSE 'Sunday' END;", zUserName
  );
  if( db_int(0, "SELECT count(*) FROM piechart")>=2 ){
    @ <center><svg width=700 height=400>
    piechart_render(700, 400, PIE_OTHER|PIE_PERCENT);
    @ </svg></centre><hr/>
  }
  @ <table class='statistics-report-table-events' border='0'






|
|







|










|
<
|
<
<
|
>
|
|
|
|
|
|
>
>
>
>
>
|







461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488

489


490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
  int nEventTotal = 0;               /* Total event count */
  int rowClass = 0;                  /* counter for alternating
                                        row colors */
  int nMaxEvents = 1;                /* max number of events for
                                        all rows. */
  Blob userFilter = empty_blob;      /* Optional user=johndoe query string */
  static const char *const daysOfWeek[] = {
  "Sunday", "Monday", "Tuesday", "Wednesday",
  "Thursday", "Friday", "Saturday"
  };

  stats_report_init_view();
  if( zUserName ){
    blob_appendf(&userFilter, "user=%s", zUserName);
  }
  db_prepare(&query,
               "SELECT cast(strftime('%%w', mtime) AS INTEGER) dow,"
               "       COUNT(*) AS eventCount"
               "  FROM v_reports"
               " WHERE ifnull(coalesce(euser,user,'')=%Q,1)"
               " GROUP BY dow ORDER BY dow", zUserName);
  @ <h1>Timeline Events (%h(stats_report_label_for_type())) by Day of the Week
  if( zUserName ){
    @ for user %h(zUserName)
  }
  @ </h1>
  db_multi_exec(
    "CREATE TEMP VIEW piechart(amt,label) AS"

    " SELECT count(*),"


    "   CASE cast(strftime('%%w', mtime) AS INT)"
    "    WHEN 0 THEN 'Sunday'"
    "    WHEN 1 THEN 'Monday'"
    "    WHEN 2 THEN 'Tuesday'"
    "    WHEN 3 THEN 'Wednesday'"
    "    WHEN 4 THEN 'Thursday'"
    "    WHEN 5 THEN 'Friday'"
    "    WHEN 6 THEN 'Saturday'"
    "    ELSE 'ERROR'"
    "   END"
    "  FROM v_reports"
    "  WHERE ifnull(coalesce(euser,user,'')=%Q,1)"
    "  GROUP BY 2 ORDER BY cast(strftime('%%w', mtime) AS INT);"
    , zUserName
  );
  if( db_int(0, "SELECT count(*) FROM piechart")>=2 ){
    @ <center><svg width=700 height=400>
    piechart_render(700, 400, PIE_OTHER|PIE_PERCENT);
    @ </svg></centre><hr/>
  }
  @ <table class='statistics-report-table-events' border='0'
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
**
** view=byweek:
**
**   y=YYYY            The year to report (default is the server's
**                     current year).
*/
void stats_report_page(){
  HQuery url;                        /* URL for various branch links */
  const char *zView = P("view");     /* Which view/report to show. */
  int eType = RPT_NONE;              /* Numeric code for view/report to show */
  int i;                             /* Loop counter */
  const char *zUserName;             /* Name of user */
  const char *azView[16];            /* Drop-down menu of view types */
  static const struct {
    const char *zName;  /* Name of view= screen type */






<







669
670
671
672
673
674
675

676
677
678
679
680
681
682
**
** view=byweek:
**
**   y=YYYY            The year to report (default is the server's
**                     current year).
*/
void stats_report_page(){

  const char *zView = P("view");     /* Which view/report to show. */
  int eType = RPT_NONE;              /* Numeric code for view/report to show */
  int i;                             /* Loop counter */
  const char *zUserName;             /* Name of user */
  const char *azView[16];            /* Drop-down menu of view types */
  static const struct {
    const char *zName;  /* Name of view= screen type */
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
  }
  for(i=0; i<ArraySize(aViewType); i++){
    if( fossil_strcmp(zView, aViewType[i].zVal)==0 ){
      eType = aViewType[i].eType;
      break;
    }
  }
  url_initialize(&url, "reports");
  cgi_query_parameters_to_url(&url);
  if( eType!=RPT_NONE ){
    int nView = 0;                     /* Slots used in azView[] */
    for(i=0; i<ArraySize(aViewType); i++){
      azView[nView++] = aViewType[i].zVal;
      azView[nView++] = aViewType[i].zName;
    }
    if( eType!=RPT_BYFILE ){
      style_submenu_multichoice("type", ArraySize(azType)/2, azType, 0);
    }
    style_submenu_multichoice("view", nView/2, azView, 0);
    if( eType!=RPT_BYUSER ){
      style_submenu_sql("u","User:",
         "SELECT '', 'All Users' UNION ALL "
         "SELECT x, x FROM ("
         "  SELECT DISTINCT trim(coalesce(euser,user)) AS x FROM event %s"
         "  ORDER BY 1 COLLATE nocase) WHERE x!=''",
         eType==RPT_BYFILE ? "WHERE type='ci'" : ""
      );
    }
  }
  style_submenu_element("Stats", "Stats", "%R/stat");
  url_reset(&url);
  style_header("Activity Reports");
  switch( eType ){
    case RPT_BYYEAR:
      stats_report_by_month_year(0, 0, zUserName);
      break;
    case RPT_BYMONTH:
      stats_report_by_month_year(1, 0, zUserName);






<
<











|









<







710
711
712
713
714
715
716


717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737

738
739
740
741
742
743
744
  }
  for(i=0; i<ArraySize(aViewType); i++){
    if( fossil_strcmp(zView, aViewType[i].zVal)==0 ){
      eType = aViewType[i].eType;
      break;
    }
  }


  if( eType!=RPT_NONE ){
    int nView = 0;                     /* Slots used in azView[] */
    for(i=0; i<ArraySize(aViewType); i++){
      azView[nView++] = aViewType[i].zVal;
      azView[nView++] = aViewType[i].zName;
    }
    if( eType!=RPT_BYFILE ){
      style_submenu_multichoice("type", ArraySize(azType)/2, azType, 0);
    }
    style_submenu_multichoice("view", nView/2, azView, 0);
    if( eType!=RPT_BYUSER ){
      style_submenu_sql("user","User:",
         "SELECT '', 'All Users' UNION ALL "
         "SELECT x, x FROM ("
         "  SELECT DISTINCT trim(coalesce(euser,user)) AS x FROM event %s"
         "  ORDER BY 1 COLLATE nocase) WHERE x!=''",
         eType==RPT_BYFILE ? "WHERE type='ci'" : ""
      );
    }
  }
  style_submenu_element("Stats", "Stats", "%R/stat");

  style_header("Activity Reports");
  switch( eType ){
    case RPT_BYYEAR:
      stats_report_by_month_year(0, 0, zUserName);
      break;
    case RPT_BYMONTH:
      stats_report_by_month_year(1, 0, zUserName);
Changes to src/style.c.
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
*/
static void url_var(
  const char *zVarPrefix,
  const char *zConfigName,
  const char *zPageName
){
  char *zVarName = mprintf("%s_url", zVarPrefix);
  char *zUrl = mprintf("%s/%s?id=%x", g.zTop, zPageName,
                       skin_id(zConfigName));
  Th_Store(zVarName, zUrl);
  free(zUrl);
  free(zVarName);
}

/*






|







358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
*/
static void url_var(
  const char *zVarPrefix,
  const char *zConfigName,
  const char *zPageName
){
  char *zVarName = mprintf("%s_url", zVarPrefix);
  char *zUrl = mprintf("%R/%s?id=%x", zPageName,
                       skin_id(zConfigName));
  Th_Store(zVarName, zUrl);
  free(zUrl);
  free(zVarName);
}

/*
1557
1558
1559
1560
1561
1562
1563



1564
1565
1566
1567
1568
1569
1570
  static const char *const azCgiVars[] = {
    "COMSPEC", "DOCUMENT_ROOT", "GATEWAY_INTERFACE",
    "HTTP_ACCEPT", "HTTP_ACCEPT_CHARSET", "HTTP_ACCEPT_ENCODING",
    "HTTP_ACCEPT_LANGUAGE", "HTTP_CONNECTION", "HTTP_HOST",
    "HTTP_USER_AGENT", "HTTP_REFERER", "PATH_INFO", "PATH_TRANSLATED",
    "QUERY_STRING", "REMOTE_ADDR", "REMOTE_PORT", "REQUEST_METHOD",
    "REQUEST_URI", "SCRIPT_FILENAME", "SCRIPT_NAME", "SERVER_PROTOCOL",



  };

  login_check_credentials();
  if( !g.perm.Admin && !g.perm.Setup && !db_get_boolean("test_env_enable",0) ){
    login_needed(0);
    return;
  }






>
>
>







1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
  static const char *const azCgiVars[] = {
    "COMSPEC", "DOCUMENT_ROOT", "GATEWAY_INTERFACE",
    "HTTP_ACCEPT", "HTTP_ACCEPT_CHARSET", "HTTP_ACCEPT_ENCODING",
    "HTTP_ACCEPT_LANGUAGE", "HTTP_CONNECTION", "HTTP_HOST",
    "HTTP_USER_AGENT", "HTTP_REFERER", "PATH_INFO", "PATH_TRANSLATED",
    "QUERY_STRING", "REMOTE_ADDR", "REMOTE_PORT", "REQUEST_METHOD",
    "REQUEST_URI", "SCRIPT_FILENAME", "SCRIPT_NAME", "SERVER_PROTOCOL",
    "HOME", "FOSSIL_HOME", "USERNAME", "USER", "FOSSIL_USER",
    "SQLITE_TMPDIR", "TMPDIR",
    "TEMP", "TMP", "FOSSIL_VFS"
  };

  login_check_credentials();
  if( !g.perm.Admin && !g.perm.Setup && !db_get_boolean("test_env_enable",0) ){
    login_needed(0);
    return;
  }
Changes to src/sync.c.
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
    return 0;
  }
  if( flags==SYNC_PUSH && db_get_boolean("dont-push",0) ){
    return 0;
  }
  zAutosync = db_get("autosync", 0);
  if( zAutosync ){
    if( (flags & SYNC_PUSH)!=0 && memcmp(zAutosync,"pull",4)==0 ){
      return 0;   /* Do not auto-push when autosync=pullonly */
    }
    if( is_false(zAutosync) ){
      return 0;   /* Autosync is completely off */
    }
  }else{
    /* Autosync defaults on.  To make it default off, "return" here. */






|







36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
    return 0;
  }
  if( flags==SYNC_PUSH && db_get_boolean("dont-push",0) ){
    return 0;
  }
  zAutosync = db_get("autosync", 0);
  if( zAutosync ){
    if( (flags & SYNC_PUSH)!=0 && fossil_strncmp(zAutosync,"pull",4)==0 ){
      return 0;   /* Do not auto-push when autosync=pullonly */
    }
    if( is_false(zAutosync) ){
      return 0;   /* Autosync is completely off */
    }
  }else{
    /* Autosync defaults on.  To make it default off, "return" here. */
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  */
  if( find_option("verily",0,0)!=0 ){
    *pSyncFlags |= SYNC_RESYNC;
  }
  url_proxy_options();
  clone_ssh_find_options();
  db_find_and_open_repository(0, 0);
  db_open_config(0);
  if( g.argc==2 ){
    if( db_get_boolean("auto-shun",1) ) configSync = CONFIGSET_SHUN;
  }else if( g.argc==3 ){
    zUrl = g.argv[2];
  }
  if( urlFlags & URL_REMEMBER ){
    clone_ssh_db_set_options();






|







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  */
  if( find_option("verily",0,0)!=0 ){
    *pSyncFlags |= SYNC_RESYNC;
  }
  url_proxy_options();
  clone_ssh_find_options();
  db_find_and_open_repository(0, 0);
  db_open_config(0, 0);
  if( g.argc==2 ){
    if( db_get_boolean("auto-shun",1) ) configSync = CONFIGSET_SHUN;
  }else if( g.argc==3 ){
    zUrl = g.argv[2];
  }
  if( urlFlags & URL_REMEMBER ){
    clone_ssh_db_set_options();
161
162
163
164
165
166
167
168

169
170
171
172
173
174
175

176
177
178
179
180
181





182
183





184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203

204
205
206
207
208
209
210

211
212
213
214
215
216





217
218





219
220
221
222
223
224
225
226
227
}

/*
** COMMAND: pull
**
** Usage: %fossil pull ?URL? ?options?
**
** Pull changes from a remote repository into the local repository.

** Use the "-R REPO" or "--repository REPO" command-line options
** to specify an alternative repository file.
**
** See clone usage for possible URL formats.
**
** If the URL is not specified, then the URL from the most recent
** clone, push, pull, remote-url, or sync command is used.

**
** The URL specified normally becomes the new "remote-url" used for
** subsequent push, pull, and sync operations.  However, the "--once"
** command-line option makes the URL a one-time-use URL that is not
** saved.
**





** Use the --private option to pull private branches from the
** remote repository.





**
** See also: clone, push, sync, remote-url
*/
void pull_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PULL;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */
  verify_all_options();

  client_sync(syncFlags, configFlags, 0);
}

/*
** COMMAND: push
**
** Usage: %fossil push ?URL? ?options?
**
** Push changes in the local repository over into a remote repository.

** Use the "-R REPO" or "--repository REPO" command-line options
** to specify an alternative repository file.
**
** See clone usage for possible URL formats.
**
** If the URL is not specified, then the URL from the most recent
** clone, push, pull, remote-url, or sync command is used.

**
** The URL specified normally becomes the new "remote-url" used for
** subsequent push, pull, and sync operations.  However, the "--once"
** command-line option makes the URL a one-time-use URL that is not
** saved.
**





** Use the --private option to push private branches to the
** remote repository.





**
** See also: clone, pull, sync, remote-url
*/
void push_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PUSH;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */






|
>
|
<
|
<

|
|
>

<
<
<
|

>
>
>
>
>
|
|
>
>
>
>
>

|

















|
>
|
<
|
<

|
|
>

<
<
<
|

>
>
>
>
>
|
|
>
>
>
>
>

|







161
162
163
164
165
166
167
168
169
170

171

172
173
174
175
176



177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212

213

214
215
216
217
218



219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
}

/*
** COMMAND: pull
**
** Usage: %fossil pull ?URL? ?options?
**
** Pull all sharable changes from a remote repository into the local repository.
** Sharable changes include public check-ins, and wiki, ticket, and tech-note
** edits.  Add the --private option to pull private branches.  Use the 

** "configuration pull" command to pull website configuration details.

**
** If URL is not specified, then the URL from the most recent clone, push,
** pull, remote-url, or sync command is used.  See "fossil help clone" for
** details on the URL formats.
**



** Options:
**
**   -B|--httpauth USER:PASS    Credentials for the simple HTTP auth protocol,
**                              if required by the remote website
**   --ipv4                     Use only IPv4, not IPv6
**   --once                     Do not remember URL for subsequent syncs
**   --proxy PROXY              Use the specified HTTP proxy
**   --private                  Pull private branches too
**   -R|--repository REPO       Repository to pull into
**   --ssl-identity FILE        Local SSL credentials, if requested by remote
**   --ssh-command SSH          Use SSH as the "ssh" command
**   -v|--verbose               Additional (debugging) output
**   --verily                   Exchange extra information with the remote
**                              to ensure no content is overlooked
**
** See also: clone, config pull, push, remote-url, sync
*/
void pull_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PULL;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */
  verify_all_options();

  client_sync(syncFlags, configFlags, 0);
}

/*
** COMMAND: push
**
** Usage: %fossil push ?URL? ?options?
**
** Push all sharable changes from the local repository to a remote repository.
** Sharable changes include public check-ins, and wiki, ticket, and tech-note
** edits.  Use --private to also push private branches.  Use the 

** "configuration pull" command to push website configuration details.

**
** If URL is not specified, then the URL from the most recent clone, push,
** pull, remote-url, or sync command is used.  See "fossil help clone" for
** details on the URL formats.
**



** Options:
**
**   -B|--httpauth USER:PASS    Credentials for the simple HTTP auth protocol,
**                              if required by the remote website
**   --ipv4                     Use only IPv4, not IPv6
**   --once                     Do not remember URL for subsequent syncs
**   --proxy PROXY              Use the specified HTTP proxy
**   --private                  Pull private branches too
**   -R|--repository REPO       Repository to pull into
**   --ssl-identity FILE        Local SSL credentials, if requested by remote
**   --ssh-command SSH          Use SSH as the "ssh" command
**   -v|--verbose               Additional (debugging) output
**   --verily                   Exchange extra information with the remote
**                              to ensure no content is overlooked
**
** See also: clone, config push, pull, remote-url, sync
*/
void push_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PUSH;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250

251
252
253
254
255
256





257
258





259
260
261
262
263
264
265
266
267

/*
** COMMAND: sync
**
** Usage: %fossil sync ?URL? ?options?
**
** Synchronize the local repository with a remote repository.  This is
** the equivalent of running both "push" and "pull" at the same time.
** Use the "-R REPO" or "--repository REPO" command-line options
** to specify an alternative repository file.
**
** See clone usage for possible URL formats.
**
** If the URL is not specified, then the URL from the most recent
** successful clone, push, pull, remote-url, or sync command is used.

**
** The URL specified normally becomes the new "remote-url" used for
** subsequent push, pull, and sync operations.  However, the "--once"
** command-line option makes the URL a one-time-use URL that is not
** saved.
**





** Use the --private option to sync private branches with the
** remote repository.





**
** See also:  clone, push, pull, remote-url
*/
void sync_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PUSH|SYNC_PULL;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */






|
<
<
|
|
<

|
|
>

<
<
<
|

>
>
>
>
>
|
|
>
>
>
>
>

|







249
250
251
252
253
254
255
256


257
258

259
260
261
262
263



264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286

/*
** COMMAND: sync
**
** Usage: %fossil sync ?URL? ?options?
**
** Synchronize all sharable changes between the local repository and a


** a remote repository.  Sharable changes include public check-ins, and wiki,
** ticket, and tech-note edits.

**
** If URL is not specified, then the URL from the most recent clone, push,
** pull, remote-url, or sync command is used.  See "fossil help clone" for
** details on the URL formats.
**



** Options:
**
**   -B|--httpauth USER:PASS    Credentials for the simple HTTP auth protocol,
**                              if required by the remote website
**   --ipv4                     Use only IPv4, not IPv6
**   --once                     Do not remember URL for subsequent syncs
**   --proxy PROXY              Use the specified HTTP proxy
**   --private                  Pull private branches too
**   -R|--repository REPO       Repository to pull into
**   --ssl-identity FILE        Local SSL credentials, if requested by remote
**   --ssh-command SSH          Use SSH as the "ssh" command
**   -v|--verbose               Additional (debugging) output
**   --verily                   Exchange extra information with the remote
**                              to ensure no content is overlooked
**
** See also: clone, pull, push, remote-url
*/
void sync_cmd(void){
  unsigned configFlags = 0;
  unsigned syncFlags = SYNC_PUSH|SYNC_PULL;
  process_sync_args(&configFlags, &syncFlags);

  /* We should be done with options.. */
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
** and "sync" commands.
**
** The remote-url is set automatically by a "clone" command or by any
** "sync", "push", or "pull" command that specifies an explicit URL.
** The default remote-url is used by auto-syncing and by "sync", "push",
** "pull" that omit the server URL.
**
** See clone usage for possible URL formats.
**
** See also: clone, push, pull, sync
*/
void remote_url_cmd(void){
  char *zUrl;
  db_find_and_open_repository(0, 0);







|







302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
** and "sync" commands.
**
** The remote-url is set automatically by a "clone" command or by any
** "sync", "push", or "pull" command that specifies an explicit URL.
** The default remote-url is used by auto-syncing and by "sync", "push",
** "pull" that omit the server URL.
**
** See "fossil help clone" for further information about URL formats
**
** See also: clone, push, pull, sync
*/
void remote_url_cmd(void){
  char *zUrl;
  db_find_and_open_repository(0, 0);

Changes to src/th.c.
1
2
3
4
5
6
7
8
9
10




















11
12
13
14
15
16
17
/*
** The implementation of the TH core. This file contains the parser, and
** the implementation of the interface in th.h.
*/

#include "config.h"
#include "th.h"
#include <string.h>
#include <assert.h>





















typedef struct Th_Command        Th_Command;
typedef struct Th_Frame          Th_Frame;
typedef struct Th_Variable       Th_Variable;
typedef struct Th_InterpAndList  Th_InterpAndList;

/*









>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/*
** The implementation of the TH core. This file contains the parser, and
** the implementation of the interface in th.h.
*/

#include "config.h"
#include "th.h"
#include <string.h>
#include <assert.h>

/*
** Values used for element values in the tcl_platform array.
*/

#if !defined(TH_ENGINE)
#  define TH_ENGINE          "TH1"
#endif

#if !defined(TH_PLATFORM)
#  if defined(_WIN32) || defined(WIN32)
#    define TH_PLATFORM      "windows"
#  else
#    define TH_PLATFORM      "unix"
#  endif
#endif

/*
** Forward declarations for structures defined below.
*/

typedef struct Th_Command        Th_Command;
typedef struct Th_Frame          Th_Frame;
typedef struct Th_Variable       Th_Variable;
typedef struct Th_InterpAndList  Th_InterpAndList;

/*
1229
1230
1231
1232
1233
1234
1235








1236
1237
1238
1239
1240
1241
1242
/*
** Return true if variable (zVar, nVar) exists.
*/
int Th_ExistsVar(Th_Interp *interp, const char *zVar, int nVar){
  Th_Variable *pValue = thFindValue(interp, zVar, nVar, 0, 1, 1, 0);
  return pValue && (pValue->zData || pValue->pHash);
}









/*
** String (zVar, nVar) must contain the name of a scalar variable or
** array member. If the variable does not exist it is created. The
** variable is set to the value supplied in string (zValue, nValue).
**
** If (zVar, nVar) refers to an existing array, TH_ERROR is returned






>
>
>
>
>
>
>
>







1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
/*
** Return true if variable (zVar, nVar) exists.
*/
int Th_ExistsVar(Th_Interp *interp, const char *zVar, int nVar){
  Th_Variable *pValue = thFindValue(interp, zVar, nVar, 0, 1, 1, 0);
  return pValue && (pValue->zData || pValue->pHash);
}

/*
** Return true if array variable (zVar, nVar) exists.
*/
int Th_ExistsArrayVar(Th_Interp *interp, const char *zVar, int nVar){
  Th_Variable *pValue = thFindValue(interp, zVar, nVar, 0, 1, 1, 0);
  return pValue && !pValue->zData && pValue->pHash;
}

/*
** String (zVar, nVar) must contain the name of a scalar variable or
** array member. If the variable does not exist it is created. The
** variable is set to the value supplied in string (zValue, nValue).
**
** If (zVar, nVar) refers to an existing array, TH_ERROR is returned
1754
1755
1756
1757
1758
1759
1760












1761
1762
1763
1764
1765
1766
1767
  Th_Free(interp, *pzStr);
  *pzStr = zNew;
  *pnStr = nNew;

  return TH_OK;
}













/*
** Delete an interpreter.
*/
void Th_DeleteInterp(Th_Interp *interp){
  assert(interp->pFrame);
  assert(0==interp->pFrame->pCaller);






>
>
>
>
>
>
>
>
>
>
>
>







1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
  Th_Free(interp, *pzStr);
  *pzStr = zNew;
  *pnStr = nNew;

  return TH_OK;
}

/*
** Initialize an interpreter.
*/
static int thInitialize(Th_Interp *interp){
  assert(interp->pFrame);

  Th_SetVar(interp, (char *)"::tcl_platform(engine)", -1, TH_ENGINE, -1);
  Th_SetVar(interp, (char *)"::tcl_platform(platform)", -1, TH_PLATFORM, -1);

  return TH_OK;
}

/*
** Delete an interpreter.
*/
void Th_DeleteInterp(Th_Interp *interp){
  assert(interp->pFrame);
  assert(0==interp->pFrame->pCaller);
1788
1789
1790
1791
1792
1793
1794

1795
1796
1797
1798
1799
1800
1801
  /* Allocate and initialise the interpreter and the global frame */
  p = pVtab->xMalloc(sizeof(Th_Interp) + sizeof(Th_Frame));
  memset(p, 0, sizeof(Th_Interp));
  p->pVtab = pVtab;
  p->paCmd = Th_HashNew(p);
  thPushFrame(p, (Th_Frame *)&p[1]);


  return p;
}

/*
** These two types are used only by the expression module, where
** the expression module means the Th_Expr() and exprXXX() functions.






>







1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
  /* Allocate and initialise the interpreter and the global frame */
  p = pVtab->xMalloc(sizeof(Th_Interp) + sizeof(Th_Frame));
  memset(p, 0, sizeof(Th_Interp));
  p->pVtab = pVtab;
  p->paCmd = Th_HashNew(p);
  thPushFrame(p, (Th_Frame *)&p[1]);
  thInitialize(p);

  return p;
}

/*
** These two types are used only by the expression module, where
** the expression module means the Th_Expr() and exprXXX() functions.
2863
2864
2865
2866
2867
2868
2869
2870




2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887




2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902





























}

/*
** Appends all currently registered command names to the specified list
** and returns TH_OK upon success.  Any other return value indicates an
** error.
*/
int Th_ListAppendCommands(Th_Interp *interp, char **pzList, int *pnList){




  Th_InterpAndList *p = (Th_InterpAndList *)Th_Malloc(
    interp, sizeof(Th_InterpAndList)
  );
  p->interp = interp;
  p->pzList = pzList;
  p->pnList = pnList;
  Th_HashIterate(interp, interp->paCmd, thListAppendHashKey, p);
  Th_Free(interp, p);
  return TH_OK;
}

/*
** Appends all variable names for the current frame to the specified list
** and returns TH_OK upon success.  Any other return value indicates an
** error.  If the current frame cannot be obtained, TH_ERROR is returned.
*/
int Th_ListAppendVariables(Th_Interp *interp, char **pzList, int *pnList){




  Th_Frame *pFrame = getFrame(interp, 0);
  if( pFrame ){
    Th_InterpAndList *p = (Th_InterpAndList *)Th_Malloc(
      interp, sizeof(Th_InterpAndList)
    );
    p->interp = interp;
    p->pzList = pzList;
    p->pnList = pnList;
    Th_HashIterate(interp, pFrame->paVar, thListAppendHashKey, p);
    Th_Free(interp, p);
    return TH_OK;
  }else{
    return TH_ERROR;
  }
}



































|
>
>
>
>
















|
>
>
>
>















>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
}

/*
** Appends all currently registered command names to the specified list
** and returns TH_OK upon success.  Any other return value indicates an
** error.
*/
int Th_ListAppendCommands(
  Th_Interp *interp,      /* Interpreter context */
  char **pzList,          /* OUT: List of command names */
  int *pnList             /* OUT: Number of command names */
){
  Th_InterpAndList *p = (Th_InterpAndList *)Th_Malloc(
    interp, sizeof(Th_InterpAndList)
  );
  p->interp = interp;
  p->pzList = pzList;
  p->pnList = pnList;
  Th_HashIterate(interp, interp->paCmd, thListAppendHashKey, p);
  Th_Free(interp, p);
  return TH_OK;
}

/*
** Appends all variable names for the current frame to the specified list
** and returns TH_OK upon success.  Any other return value indicates an
** error.  If the current frame cannot be obtained, TH_ERROR is returned.
*/
int Th_ListAppendVariables(
  Th_Interp *interp,      /* Interpreter context */
  char **pzList,          /* OUT: List of variable names */
  int *pnList             /* OUT: Number of variable names */
){
  Th_Frame *pFrame = getFrame(interp, 0);
  if( pFrame ){
    Th_InterpAndList *p = (Th_InterpAndList *)Th_Malloc(
      interp, sizeof(Th_InterpAndList)
    );
    p->interp = interp;
    p->pzList = pzList;
    p->pnList = pnList;
    Th_HashIterate(interp, pFrame->paVar, thListAppendHashKey, p);
    Th_Free(interp, p);
    return TH_OK;
  }else{
    return TH_ERROR;
  }
}

/*
** Appends all array element names for the specified array variable to the
** specified list and returns TH_OK upon success.  Any other return value
** indicates an error.
*/
int Th_ListAppendArray(
  Th_Interp *interp,      /* Interpreter context */
  const char *zVar,       /* Pointer to variable name */
  int nVar,               /* Number of bytes at nVar */
  char **pzList,          /* OUT: List of array element names */
  int *pnList             /* OUT: Number of array element names */
){
  Th_Variable *pValue = thFindValue(interp, zVar, nVar, 0, 1, 1, 0);
  if( pValue && !pValue->zData && pValue->pHash ){
    Th_InterpAndList *p = (Th_InterpAndList *)Th_Malloc(
      interp, sizeof(Th_InterpAndList)
    );
    p->interp = interp;
    p->pzList = pzList;
    p->pnList = pnList;
    Th_HashIterate(interp, pValue->pHash, thListAppendHashKey, p);
    Th_Free(interp, p);
  }else{
    *pzList = 0;
    *pnList = 0;
  }
  return TH_OK;
}
Changes to src/th.h.
48
49
50
51
52
53
54

55
56
57
58
59
60
61
int Th_Expr(Th_Interp *interp, const char *, int);

/*
** Access TH variables in the current stack frame. If the variable name
** begins with "::", the lookup is in the top level (global) frame.
*/
int Th_ExistsVar(Th_Interp *, const char *, int);

int Th_GetVar(Th_Interp *, const char *, int);
int Th_SetVar(Th_Interp *, const char *, int, const char *, int);
int Th_LinkVar(Th_Interp *, const char *, int, int, const char *, int);
int Th_UnsetVar(Th_Interp *, const char *, int);

typedef int (*Th_CommandProc)(Th_Interp *, void *, int, const char **, int *);







>







48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
int Th_Expr(Th_Interp *interp, const char *, int);

/*
** Access TH variables in the current stack frame. If the variable name
** begins with "::", the lookup is in the top level (global) frame.
*/
int Th_ExistsVar(Th_Interp *, const char *, int);
int Th_ExistsArrayVar(Th_Interp *, const char *, int);
int Th_GetVar(Th_Interp *, const char *, int);
int Th_SetVar(Th_Interp *, const char *, int, const char *, int);
int Th_LinkVar(Th_Interp *, const char *, int, int, const char *, int);
int Th_UnsetVar(Th_Interp *, const char *, int);

typedef int (*Th_CommandProc)(Th_Interp *, void *, int, const char **, int *);

141
142
143
144
145
146
147

148
149
150
151
152
153
154
int Th_SetResultDouble(Th_Interp *, double);

/*
** Functions for handling command and variable introspection.
*/
int Th_ListAppendCommands(Th_Interp *, char **, int *);
int Th_ListAppendVariables(Th_Interp *, char **, int *);


/*
** Drop in replacements for the corresponding standard library functions.
*/
int th_strlen(const char *);
int th_isdigit(char);
int th_isspace(char);






>







142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
int Th_SetResultDouble(Th_Interp *, double);

/*
** Functions for handling command and variable introspection.
*/
int Th_ListAppendCommands(Th_Interp *, char **, int *);
int Th_ListAppendVariables(Th_Interp *, char **, int *);
int Th_ListAppendArray(Th_Interp *, const char *, int, char **, int *);

/*
** Drop in replacements for the corresponding standard library functions.
*/
int th_strlen(const char *);
int th_isdigit(char);
int th_isspace(char);
Changes to src/th_lang.c.
493
494
495
496
497
498
499

500
501
502

503
504
505
506
507
508
509
      argl[2];    /* Space for copies of parameter names and default values */
  p = (ProcDefn *)Th_Malloc(interp, nByte);

  /* If the last parameter in the parameter list is "args", then set the
  ** ProcDefn.hasArgs flag. The "args" parameter does not require an
  ** entry in the ProcDefn.azParam[] or ProcDefn.azDefault[] arrays.
  */

  if( anParam[nParam-1]==4 && 0==memcmp(azParam[nParam-1], "args", 4) ){
    p->hasArgs = 1;
    nParam--;

  }

  p->nParam    = nParam;
  p->azParam   = (char **)&p[1];
  p->anParam   = (int *)&p->azParam[nParam];
  p->azDefault = (char **)&p->anParam[nParam];
  p->anDefault = (int *)&p->azDefault[nParam];






>
|
|
|
>







493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
      argl[2];    /* Space for copies of parameter names and default values */
  p = (ProcDefn *)Th_Malloc(interp, nByte);

  /* If the last parameter in the parameter list is "args", then set the
  ** ProcDefn.hasArgs flag. The "args" parameter does not require an
  ** entry in the ProcDefn.azParam[] or ProcDefn.azDefault[] arrays.
  */
  if( nParam>0 ){
    if( anParam[nParam-1]==4 && 0==memcmp(azParam[nParam-1], "args", 4) ){
      p->hasArgs = 1;
      nParam--;
    }
  }

  p->nParam    = nParam;
  p->azParam   = (char **)&p[1];
  p->anParam   = (int *)&p->azParam[nParam];
  p->azDefault = (char **)&p->anParam[nParam];
  p->anDefault = (int *)&p->azDefault[nParam];
968
969
970
971
972
973
974










































975
976
977
978
979
980
981
982
  if( zElem ) Th_Free(interp, zElem);
  return TH_OK;
}

/*
** TH Syntax:
**










































**   unset VAR
*/
static int unset_command(
  Th_Interp *interp,
  void *ctx,
  int argc,
  const char **argv,
  int *argl






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|







970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
  if( zElem ) Th_Free(interp, zElem);
  return TH_OK;
}

/*
** TH Syntax:
**
**   array exists VARNAME
*/
static int array_exists_command(
  Th_Interp *interp, void *ctx, int argc, const char **argv, int *argl
){
  int rc;

  if( argc!=3 ){
    return Th_WrongNumArgs(interp, "array exists var");
  }
  rc = Th_ExistsArrayVar(interp, argv[2], argl[2]);
  Th_SetResultInt(interp, rc);
  return TH_OK;
}

/*
** TH Syntax:
**
**   array names VARNAME
*/
static int array_names_command(
  Th_Interp *interp, void *ctx, int argc, const char **argv, int *argl
){
  int rc;
  char *zElem = 0;
  int nElem = 0;

  if( argc!=3 ){
    return Th_WrongNumArgs(interp, "array names varname");
  }
  rc = Th_ListAppendArray(interp, argv[2], argl[2], &zElem, &nElem);
  if( rc!=TH_OK ){
    return rc;
  }
  Th_SetResult(interp, zElem, nElem);
  if( zElem ) Th_Free(interp, zElem);
  return TH_OK;
}

/*
** TH Syntax:
**
**   unset VARNAME
*/
static int unset_command(
  Th_Interp *interp,
  void *ctx,
  int argc,
  const char **argv,
  int *argl
1061
1062
1063
1064
1065
1066
1067





















1068
1069
1070
1071
1072
1073
1074
  int *argl
){
  static const Th_SubCommand aSub[] = {
    { "commands", info_commands_command },
    { "exists",   info_exists_command },
    { "vars",     info_vars_command },
    { 0, 0 }





















  };
  return Th_CallSubCommand(interp, ctx, argc, argv, argl, aSub);
}

/*
** Convert the script level frame specification (used by the commands
** [uplevel] and [upvar]) in (zFrame, nFrame) to an integer frame as






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
  int *argl
){
  static const Th_SubCommand aSub[] = {
    { "commands", info_commands_command },
    { "exists",   info_exists_command },
    { "vars",     info_vars_command },
    { 0, 0 }
  };
  return Th_CallSubCommand(interp, ctx, argc, argv, argl, aSub);
}

/*
** TH Syntax:
**
**   array exists VARNAME
**   array names VARNAME
*/
static int array_command(
  Th_Interp *interp,
  void *ctx,
  int argc,
  const char **argv,
  int *argl
){
  static const Th_SubCommand aSub[] = {
    { "exists", array_exists_command },
    { "names",  array_names_command },
    { 0, 0 }
  };
  return Th_CallSubCommand(interp, ctx, argc, argv, argl, aSub);
}

/*
** Convert the script level frame specification (used by the commands
** [uplevel] and [upvar]) in (zFrame, nFrame) to an integer frame as
1178
1179
1180
1181
1182
1183
1184

1185
1186
1187
1188
1189
1190
1191
int th_register_language(Th_Interp *interp){
  /* Array of built-in commands. */
  struct _Command {
    const char *zName;
    Th_CommandProc xProc;
    void *pContext;
  } aCommand[] = {

    {"catch",    catch_command,   0},
    {"expr",     expr_command,    0},
    {"for",      for_command,     0},
    {"if",       if_command,      0},
    {"info",     info_command,    0},
    {"lindex",   lindex_command,  0},
    {"list",     list_command,    0},






>







1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
int th_register_language(Th_Interp *interp){
  /* Array of built-in commands. */
  struct _Command {
    const char *zName;
    Th_CommandProc xProc;
    void *pContext;
  } aCommand[] = {
    {"array",    array_command,   0},
    {"catch",    catch_command,   0},
    {"expr",     expr_command,    0},
    {"for",      for_command,     0},
    {"if",       if_command,      0},
    {"info",     info_command,    0},
    {"lindex",   lindex_command,  0},
    {"list",     list_command,    0},
Changes to src/th_main.c.
140
141
142
143
144
145
146





























































































147
148
149
150
151
152
153
void Th_PrintTraceLog(){
  if( g.thTrace ){
    fossil_print("\n------------------ BEGIN TRACE LOG ------------------\n");
    fossil_print("%s", blob_str(&g.thLog));
    fossil_print("\n------------------- END TRACE LOG -------------------\n");
  }
}






























































































/*
** TH1 command: httpize STRING
**
** Escape all characters of STRING which have special meaning in URI
** components. Return a new string result.
*/






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
void Th_PrintTraceLog(){
  if( g.thTrace ){
    fossil_print("\n------------------ BEGIN TRACE LOG ------------------\n");
    fossil_print("%s", blob_str(&g.thLog));
    fossil_print("\n------------------- END TRACE LOG -------------------\n");
  }
}

/*
** - adopted from ls_cmd_rev in checkin.c
** - adopted commands/error handling for usage within th1
** - interface adopted to allow result creation as TH1 List
**
** Takes a checkin identifier in zRev and an optiona glob pattern in zGLOB
** as parameter returns a TH list in pzList,pnList with filenames matching
** glob pattern with the checking
*/
static void dir_cmd_rev(
  Th_Interp *interp,
  char **pzList,
  int *pnList,
  const char *zRev,  /* Revision string given */
  const char *zGlob, /* Glob pattern given */
  int bDetails
){
  Stmt q;
  char *zOrderBy = "pathname COLLATE nocase";
  int rid;

  rid = th1_name_to_typed_rid(interp, zRev, "ci");
  compute_fileage(rid, zGlob);
  db_prepare(&q,
    "SELECT datetime(fileage.mtime, toLocal()), fileage.pathname,\n"
    "       blob.size\n"
    "  FROM fileage, blob\n"
    " WHERE blob.rid=fileage.fid \n"
    " ORDER BY %s;", zOrderBy /*safe-for-%s*/
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFile = db_column_text(&q, 1);
    if( bDetails ){
      const char *zTime = db_column_text(&q, 0);
      int size = db_column_int(&q, 2);
      char zSize[50];
      char *zSubList = 0;
      int nSubList = 0;
      sqlite3_snprintf(sizeof(zSize), zSize, "%d", size);
      Th_ListAppend(interp, &zSubList, &nSubList, zFile, -1);
      Th_ListAppend(interp, &zSubList, &nSubList, zSize, -1);
      Th_ListAppend(interp, &zSubList, &nSubList, zTime, -1);
      Th_ListAppend(interp, pzList, pnList, zSubList, -1);
      Th_Free(interp, zSubList);
    }else{
      Th_ListAppend(interp, pzList, pnList, zFile, -1);
    }
  }
  db_finalize(&q);
}

/*
** TH1 command: dir CHECKIN ?GLOB? ?DETAILS?
**
** Returns a list containing all files in CHECKIN. If GLOB is given only
** the files matching the pattern GLOB within CHECKIN will be returned.
** If DETAILS is non-zero, the result will be a list-of-lists, with each
** element containing at least three elements: the file name, the file
** size (in bytes), and the file last modification time (relative to the
** time zone configured for the repository).
*/
static int dirCmd(
  Th_Interp *interp,
  void *ctx,
  int argc,
  const char **argv,
  int *argl
){
  const char *zGlob = 0;
  int bDetails = 0;

  if( argc<2 || argc>4 ){
    return Th_WrongNumArgs(interp, "dir CHECKIN ?GLOB? ?DETAILS?");
  }
  if( argc>=3 ){
    zGlob = argv[2];
  }
  if( argc>=4 && Th_ToInt(interp, argv[3], argl[3], &bDetails) ){
    return TH_ERROR;
  }
  if( Th_IsRepositoryOpen() ){
    char *zList = 0;
    int nList = 0;
    dir_cmd_rev(interp, &zList, &nList, argv[1], zGlob, bDetails);
    Th_SetResult(interp, zList, nList);
    Th_Free(interp, zList);
    return TH_OK;
  }else{
    Th_SetResult(interp, "repository unavailable", -1);
    return TH_ERROR;
  }
}

/*
** TH1 command: httpize STRING
**
** Escape all characters of STRING which have special meaning in URI
** components. Return a new string result.
*/
335
336
337
338
339
340
341
































































342
343
344
345
346
347
348
){
  if( argc!=2 ){
    return Th_WrongNumArgs(interp, "puts STRING");
  }
  sendText((char*)argv[1], argl[1], *(unsigned int*)pConvert);
  return TH_OK;
}

































































/*
** TH1 command: markdown STRING
**
** Renders the input string as markdown.  The result is a two-element list.
** The first element is the text-only title string.  The second element
** contains the body, rendered as HTML.






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
){
  if( argc!=2 ){
    return Th_WrongNumArgs(interp, "puts STRING");
  }
  sendText((char*)argv[1], argl[1], *(unsigned int*)pConvert);
  return TH_OK;
}

/*
** TH1 command: redirect URL
**
** Issues an HTTP redirect (302) to the specified URL and then exits the
** process.
*/
static int redirectCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  if( argc!=2 ){
    return Th_WrongNumArgs(interp, "redirect URL");
  }
  cgi_redirect(argv[1]);
  Th_SetResult(interp, argv[1], argl[1]); /* NOT REACHED */
  return TH_OK;
}

/*
** TH1 command: insertCsrf
**
** While rendering a form, call this command to add the Anti-CSRF token
** as a hidden element of the form.
*/
static int insertCsrfCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  if( argc!=1 ){
    return Th_WrongNumArgs(interp, "insertCsrf");
  }
  login_insert_csrf_secret();
  return TH_OK;
}

/*
** TH1 command: verifyCsrf
**
** Before using the results of a form, first call this command to verify
** that this Anti-CSRF token is present and is valid.  If the Anti-CSRF token
** is missing or is incorrect, that indicates a cross-site scripting attack.
** If the event of an attack is detected, an error message is generated and
** all further processing is aborted.
*/
static int verifyCsrfCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  if( argc!=1 ){
    return Th_WrongNumArgs(interp, "verifyCsrf");
  }
  login_verify_csrf_secret();
  return TH_OK;
}

/*
** TH1 command: markdown STRING
**
** Renders the input string as markdown.  The result is a two-element list.
** The first element is the text-only title string.  The second element
** contains the body, rendered as HTML.
415
416
417
418
419
420
421






















422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
    return Th_WrongNumArgs(interp, "htmlize STRING");
  }
  zOut = htmlize((char*)argv[1], argl[1]);
  Th_SetResult(interp, zOut, -1);
  free(zOut);
  return TH_OK;
}























/*
** TH1 command: date
**
** Return a string which is the current time and date.  If the
** -local option is used, the date appears using localtime instead
** of UTC.
*/
static int dateCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  char *zOut;
  if( argc>=2 && argl[1]==6 && memcmp(argv[1],"-local",6)==0 ){
    zOut = db_text("??", "SELECT datetime('now'%s)", timeline_utc());
  }else{
    zOut = db_text("??", "SELECT datetime('now')");
  }
  Th_SetResult(interp, zOut, -1);
  free(zOut);
  return TH_OK;
}






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

















|







572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
    return Th_WrongNumArgs(interp, "htmlize STRING");
  }
  zOut = htmlize((char*)argv[1], argl[1]);
  Th_SetResult(interp, zOut, -1);
  free(zOut);
  return TH_OK;
}

/*
** TH1 command: encode64 STRING
**
** Encode the specified string using Base64 and return the result.
*/
static int encode64Cmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  char *zOut;
  if( argc!=2 ){
    return Th_WrongNumArgs(interp, "encode64 STRING");
  }
  zOut = encode64((char*)argv[1], argl[1]);
  Th_SetResult(interp, zOut, -1);
  free(zOut);
  return TH_OK;
}

/*
** TH1 command: date
**
** Return a string which is the current time and date.  If the
** -local option is used, the date appears using localtime instead
** of UTC.
*/
static int dateCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
){
  char *zOut;
  if( argc>=2 && argl[1]==6 && memcmp(argv[1],"-local",6)==0 ){
    zOut = db_text("??", "SELECT datetime('now',toLocal())");
  }else{
    zOut = db_text("??", "SELECT datetime('now')");
  }
  Th_SetResult(interp, zOut, -1);
  free(zOut);
  return TH_OK;
}
536
537
538
539
540
541
542

543
544
545
546
547
548
549
550
551
552
553


554
555
556
557
558
559
560
** TH1 command: hasfeature STRING
**
** Return true if the fossil binary has the given compile-time feature
** enabled. The set of features includes:
**
** "ssl"             = FOSSIL_ENABLE_SSL
** "legacyMvRm"      = FOSSIL_ENABLE_LEGACY_MV_RM

** "th1Docs"         = FOSSIL_ENABLE_TH1_DOCS
** "th1Hooks"        = FOSSIL_ENABLE_TH1_HOOKS
** "tcl"             = FOSSIL_ENABLE_TCL
** "useTclStubs"     = USE_TCL_STUBS
** "tclStubs"        = FOSSIL_ENABLE_TCL_STUBS
** "tclPrivateStubs" = FOSSIL_ENABLE_TCL_PRIVATE_STUBS
** "json"            = FOSSIL_ENABLE_JSON
** "markdown"        = FOSSIL_ENABLE_MARKDOWN
** "unicodeCmdLine"  = !BROKEN_MINGW_CMDLINE
** "dynamicBuild"    = FOSSIL_DYNAMIC_BUILD
**


*/
static int hasfeatureCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl






>











>
>







715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
** TH1 command: hasfeature STRING
**
** Return true if the fossil binary has the given compile-time feature
** enabled. The set of features includes:
**
** "ssl"             = FOSSIL_ENABLE_SSL
** "legacyMvRm"      = FOSSIL_ENABLE_LEGACY_MV_RM
** "execRelPaths"    = FOSSIL_ENABLE_EXEC_REL_PATHS
** "th1Docs"         = FOSSIL_ENABLE_TH1_DOCS
** "th1Hooks"        = FOSSIL_ENABLE_TH1_HOOKS
** "tcl"             = FOSSIL_ENABLE_TCL
** "useTclStubs"     = USE_TCL_STUBS
** "tclStubs"        = FOSSIL_ENABLE_TCL_STUBS
** "tclPrivateStubs" = FOSSIL_ENABLE_TCL_PRIVATE_STUBS
** "json"            = FOSSIL_ENABLE_JSON
** "markdown"        = FOSSIL_ENABLE_MARKDOWN
** "unicodeCmdLine"  = !BROKEN_MINGW_CMDLINE
** "dynamicBuild"    = FOSSIL_DYNAMIC_BUILD
**
** Specifying an unknown feature will return a value of false, it will not
** raise a script error.
*/
static int hasfeatureCmd(
  Th_Interp *interp,
  void *p,
  int argc,
  const char **argv,
  int *argl
573
574
575
576
577
578
579





580
581
582
583
584
585
586
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
  else if( 0 == fossil_strnicmp( zArg, "legacyMvRm\0", 11 ) ){
    rc = 1;
  }





#endif
#if defined(FOSSIL_ENABLE_TH1_DOCS)
  else if( 0 == fossil_strnicmp( zArg, "th1Docs\0", 8 ) ){
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_TH1_HOOKS)






>
>
>
>
>







755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
  else if( 0 == fossil_strnicmp( zArg, "legacyMvRm\0", 11 ) ){
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_EXEC_REL_PATHS)
  else if( 0 == fossil_strnicmp( zArg, "execRelPaths\0", 13 ) ){
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_TH1_DOCS)
  else if( 0 == fossil_strnicmp( zArg, "th1Docs\0", 8 ) ){
    rc = 1;
  }
#endif
#if defined(FOSSIL_ENABLE_TH1_HOOKS)
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
    if( Th_IsRepositoryOpen() ){
      g.th1Flags |= TH_STATE_REPOSITORY;
    }else{
      g.th1Flags &= ~TH_STATE_REPOSITORY;
    }
  }
  if( !Th_IsConfigOpen() ){
    db_open_config(0);
    if( Th_IsConfigOpen() ){
      g.th1Flags |= TH_STATE_CONFIG;
    }else{
      g.th1Flags &= ~TH_STATE_CONFIG;
    }
  }
}






|







1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
    if( Th_IsRepositoryOpen() ){
      g.th1Flags |= TH_STATE_REPOSITORY;
    }else{
      g.th1Flags &= ~TH_STATE_REPOSITORY;
    }
  }
  if( !Th_IsConfigOpen() ){
    db_open_config(0, 1);
    if( Th_IsConfigOpen() ){
      g.th1Flags |= TH_STATE_CONFIG;
    }else{
      g.th1Flags &= ~TH_STATE_CONFIG;
    }
  }
}
1630
1631
1632
1633
1634
1635
1636

1637

1638
1639
1640
1641
1642
1643
1644
1645
1646

1647
1648
1649
1650
1651

1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664

1665
1666
1667
1668
1669
1670
1671
    {"anoncap",       hascapCmd,            (void*)&anonFlag},
    {"anycap",        anycapCmd,            0},
    {"artifact",      artifactCmd,          0},
    {"checkout",      checkoutCmd,          0},
    {"combobox",      comboboxCmd,          0},
    {"date",          dateCmd,              0},
    {"decorate",      wikiCmd,              (void*)&aFlags[2]},

    {"enable_output", enableOutputCmd,      0},

    {"getParameter",  getParameterCmd,      0},
    {"glob_match",    globMatchCmd,         0},
    {"globalState",   globalStateCmd,       0},
    {"httpize",       httpizeCmd,           0},
    {"hascap",        hascapCmd,            (void*)&zeroInt},
    {"hasfeature",    hasfeatureCmd,        0},
    {"html",          putsCmd,              (void*)&aFlags[0]},
    {"htmlize",       htmlizeCmd,           0},
    {"http",          httpCmd,              0},

    {"linecount",     linecntCmd,           0},
    {"markdown",      markdownCmd,          0},
    {"puts",          putsCmd,              (void*)&aFlags[1]},
    {"query",         queryCmd,             0},
    {"randhex",       randhexCmd,           0},

    {"regexp",        regexpCmd,            0},
    {"reinitialize",  reinitializeCmd,      0},
    {"render",        renderCmd,            0},
    {"repository",    repositoryCmd,        0},
    {"searchable",    searchableCmd,        0},
    {"setParameter",  setParameterCmd,      0},
    {"setting",       settingCmd,           0},
    {"styleHeader",   styleHeaderCmd,       0},
    {"styleFooter",   styleFooterCmd,       0},
    {"tclReady",      tclReadyCmd,          0},
    {"trace",         traceCmd,             0},
    {"stime",         stimeCmd,             0},
    {"utime",         utimeCmd,             0},

    {"wiki",          wikiCmd,              (void*)&aFlags[0]},
    {0, 0, 0}
  };
  if( g.thTrace ){
    Th_Trace("th1-init 0x%x => 0x%x<br />\n", g.th1Flags, flags);
  }
  if( needConfig ){






>

>









>





>













>







1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
    {"anoncap",       hascapCmd,            (void*)&anonFlag},
    {"anycap",        anycapCmd,            0},
    {"artifact",      artifactCmd,          0},
    {"checkout",      checkoutCmd,          0},
    {"combobox",      comboboxCmd,          0},
    {"date",          dateCmd,              0},
    {"decorate",      wikiCmd,              (void*)&aFlags[2]},
    {"dir",           dirCmd,               0},
    {"enable_output", enableOutputCmd,      0},
    {"encode64",      encode64Cmd,          0},
    {"getParameter",  getParameterCmd,      0},
    {"glob_match",    globMatchCmd,         0},
    {"globalState",   globalStateCmd,       0},
    {"httpize",       httpizeCmd,           0},
    {"hascap",        hascapCmd,            (void*)&zeroInt},
    {"hasfeature",    hasfeatureCmd,        0},
    {"html",          putsCmd,              (void*)&aFlags[0]},
    {"htmlize",       htmlizeCmd,           0},
    {"http",          httpCmd,              0},
    {"insertCsrf",    insertCsrfCmd,        0},
    {"linecount",     linecntCmd,           0},
    {"markdown",      markdownCmd,          0},
    {"puts",          putsCmd,              (void*)&aFlags[1]},
    {"query",         queryCmd,             0},
    {"randhex",       randhexCmd,           0},
    {"redirect",      redirectCmd,          0},
    {"regexp",        regexpCmd,            0},
    {"reinitialize",  reinitializeCmd,      0},
    {"render",        renderCmd,            0},
    {"repository",    repositoryCmd,        0},
    {"searchable",    searchableCmd,        0},
    {"setParameter",  setParameterCmd,      0},
    {"setting",       settingCmd,           0},
    {"styleHeader",   styleHeaderCmd,       0},
    {"styleFooter",   styleFooterCmd,       0},
    {"tclReady",      tclReadyCmd,          0},
    {"trace",         traceCmd,             0},
    {"stime",         stimeCmd,             0},
    {"utime",         utimeCmd,             0},
    {"verifyCsrf",    verifyCsrfCmd,        0},
    {"wiki",          wikiCmd,              (void*)&aFlags[0]},
    {0, 0, 0}
  };
  if( g.thTrace ){
    Th_Trace("th1-init 0x%x => 0x%x<br />\n", g.th1Flags, flags);
  }
  if( needConfig ){
2161
2162
2163
2164
2165
2166
2167

2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
** on standard output.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --open-config        Open the configuration database

*/
void test_th_render(void){
  int forceCgi = 0, fullHttpReply = 0;
  Blob in;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  if( find_option("open-config", 0, 0)!=0 ){






>


|







2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
** on standard output.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --open-config        Open the configuration database
**     --th-trace           Trace TH1 execution (for debugging purposes)
*/
void test_th_render(void){
  int forceCgi, fullHttpReply;
  Blob in;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  if( find_option("open-config", 0, 0)!=0 ){
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203

2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216

2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227











































2228
2229
2230

































2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241

2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255

2256

2257
2258
2259


2260

2261
2262
2263
2264
2265
/*
** COMMAND: test-th-eval
**
** Usage: %fossil test-th-eval SCRIPT
**
** Evaluate SCRIPT as if it were a header or footer or ticket rendering
** script, evaluate it, and show the results on standard output.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --open-config        Open the configuration database

*/
void test_th_eval(void){
  int rc;
  const char *zRc;
  int forceCgi, fullHttpReply;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  if( find_option("open-config", 0, 0)!=0 ){
    Th_OpenConfig(1);
  }

  if( g.argc!=3 ){
    usage("script");
  }
  Th_FossilInit(TH_INIT_DEFAULT);
  rc = Th_Eval(g.interp, 0, g.argv[2], -1);
  zRc = Th_ReturnCodeName(rc, 1);
  fossil_print("%s%s%s\n", zRc, zRc ? ": " : "", Th_GetResult(g.interp, 0));
  Th_PrintTraceLog();
  if( forceCgi ) cgi_reply();
}












































#ifdef FOSSIL_ENABLE_TH1_HOOKS
/*
** COMMAND: test-th-hook

































*/
void test_th_hook(void){
  int rc = TH_OK;
  int nResult = 0;
  char *zResult;
  int forceCgi, fullHttpReply;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);

  if( g.argc<5 ){
    usage("TYPE NAME FLAGS");
  }
  if( fossil_stricmp(g.argv[2], "cmdhook")==0 ){
    rc = Th_CommandHook(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "cmdnotify")==0 ){
    rc = Th_CommandNotify(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "webhook")==0 ){
    rc = Th_WebpageHook(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "webnotify")==0 ){
    rc = Th_WebpageNotify(g.argv[3], (char)atoi(g.argv[4]));
  }else{
    fossil_fatal("Unknown TH1 hook %s\n", g.argv[2]);
  }

  zResult = (char*)Th_GetResult(g.interp, &nResult);

  sendText("RESULT (", -1, 0);
  sendText(Th_ReturnCodeName(rc, 0), -1, 0);
  sendText("): ", -1, 0);


  sendText(zResult, nResult, 0);

  sendText("\n", -1, 0);
  Th_PrintTraceLog();
  if( forceCgi ) cgi_reply();
}
#endif






|






>













>











>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>



>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>




|






>














>
|
>


|
>
>
|
>





2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
/*
** COMMAND: test-th-eval
**
** Usage: %fossil test-th-eval SCRIPT
**
** Evaluate SCRIPT as if it were a header or footer or ticket rendering
** script and show the results on standard output.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --open-config        Open the configuration database
**     --th-trace           Trace TH1 execution (for debugging purposes)
*/
void test_th_eval(void){
  int rc;
  const char *zRc;
  int forceCgi, fullHttpReply;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  if( find_option("open-config", 0, 0)!=0 ){
    Th_OpenConfig(1);
  }
  verify_all_options();
  if( g.argc!=3 ){
    usage("script");
  }
  Th_FossilInit(TH_INIT_DEFAULT);
  rc = Th_Eval(g.interp, 0, g.argv[2], -1);
  zRc = Th_ReturnCodeName(rc, 1);
  fossil_print("%s%s%s\n", zRc, zRc ? ": " : "", Th_GetResult(g.interp, 0));
  Th_PrintTraceLog();
  if( forceCgi ) cgi_reply();
}

/*
** COMMAND: test-th-source
**
** Usage: %fossil test-th-source FILE
**
** Evaluate the contents of the file named "FILE" as if it were a header
** or footer or ticket rendering script and show the results on standard
** output.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --open-config        Open the configuration database
**     --th-trace           Trace TH1 execution (for debugging purposes)
*/
void test_th_source(void){
  int rc;
  const char *zRc;
  int forceCgi, fullHttpReply;
  Blob in;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  if( find_option("open-config", 0, 0)!=0 ){
    Th_OpenConfig(1);
  }
  verify_all_options();
  if( g.argc!=3 ){
    usage("file");
  }
  blob_zero(&in);
  blob_read_from_file(&in, g.argv[2]);
  Th_FossilInit(TH_INIT_DEFAULT);
  rc = Th_Eval(g.interp, 0, blob_str(&in), -1);
  zRc = Th_ReturnCodeName(rc, 1);
  fossil_print("%s%s%s\n", zRc, zRc ? ": " : "", Th_GetResult(g.interp, 0));
  Th_PrintTraceLog();
  if( forceCgi ) cgi_reply();
}

#ifdef FOSSIL_ENABLE_TH1_HOOKS
/*
** COMMAND: test-th-hook
**
** Usage: %fossil test-th-hook TYPE NAME FLAGS
**
** Evaluates the TH1 script configured for the pre-operation (i.e. a command
** or web page) "hook" or post-operation "notification".  The results of the
** script evaluation, if any, will be printed to the standard output channel.
** The NAME argument must be the name of a command or web page; however, it
** does not necessarily have to be a command or web page that is normally
** recognized by Fossil.  The FLAGS argument will be used to set the value
** of the "cmd_flags" and/or "web_flags" TH1 variables, if applicable.  The
** TYPE argument must be one of the following:
**
**     cmdhook              Executes the TH1 procedure [command_hook], after
**                          setting the TH1 variables "cmd_name", "cmd_args",
**                          and "cmd_flags" to appropriate values.
**
**     cmdnotify            Executes the TH1 procedure [command_notify], after
**                          setting the TH1 variables "cmd_name", "cmd_args",
**                          and "cmd_flags" to appropriate values.
**
**     webhook              Executes the TH1 procedure [webpage_hook], after
**                          setting the TH1 variables "web_name", "web_args",
**                          and "web_flags" to appropriate values.
**
**     webnotify            Executes the TH1 procedure [webpage_notify], after
**                          setting the TH1 variables "web_name", "web_args",
**                          and "web_flags" to appropriate values.
**
** Options:
**
**     --cgi                Include a CGI response header in the output
**     --http               Include an HTTP response header in the output
**     --th-trace           Trace TH1 execution (for debugging purposes)
*/
void test_th_hook(void){
  int rc = TH_OK;
  int nResult = 0;
  char *zResult = 0;
  int forceCgi, fullHttpReply;
  Th_InitTraceLog();
  forceCgi = find_option("cgi", 0, 0)!=0;
  fullHttpReply = find_option("http", 0, 0)!=0;
  if( fullHttpReply ) forceCgi = 1;
  if( forceCgi ) Th_ForceCgi(fullHttpReply);
  verify_all_options();
  if( g.argc<5 ){
    usage("TYPE NAME FLAGS");
  }
  if( fossil_stricmp(g.argv[2], "cmdhook")==0 ){
    rc = Th_CommandHook(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "cmdnotify")==0 ){
    rc = Th_CommandNotify(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "webhook")==0 ){
    rc = Th_WebpageHook(g.argv[3], (char)atoi(g.argv[4]));
  }else if( fossil_stricmp(g.argv[2], "webnotify")==0 ){
    rc = Th_WebpageNotify(g.argv[3], (char)atoi(g.argv[4]));
  }else{
    fossil_fatal("Unknown TH1 hook %s\n", g.argv[2]);
  }
  if( g.interp ){
    zResult = (char*)Th_GetResult(g.interp, &nResult);
  }
  sendText("RESULT (", -1, 0);
  sendText(Th_ReturnCodeName(rc, 0), -1, 0);
  sendText(")", -1, 0);
  if( zResult && nResult>0 ){
    sendText(": ", -1, 0);
    sendText(zResult, nResult, 0);
  }
  sendText("\n", -1, 0);
  Th_PrintTraceLog();
  if( forceCgi ) cgi_reply();
}
#endif
Changes to src/th_tcl.c.
22
23
24
25
26
27
28








29
30
31
32
33
34
35
#ifdef FOSSIL_ENABLE_TCL

#include "sqlite3.h"
#include "th.h"
#include "tcl.h"









/*
** These macros are designed to reduce the redundant code required to marshal
** arguments from TH1 to Tcl.
*/
#define USE_ARGV_TO_OBJV() \
  int objc;                \
  Tcl_Obj **objv;          \






>
>
>
>
>
>
>
>







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#ifdef FOSSIL_ENABLE_TCL

#include "sqlite3.h"
#include "th.h"
#include "tcl.h"

/*
** This macro is used to verify that the header version of Tcl meets some
** minimum requirement.
*/
#define MINIMUM_TCL_VERSION(major, minor) \
  ((TCL_MAJOR_VERSION > (major)) || \
   ((TCL_MAJOR_VERSION == (major)) && (TCL_MINOR_VERSION >= (minor))))

/*
** These macros are designed to reduce the redundant code required to marshal
** arguments from TH1 to Tcl.
*/
#define USE_ARGV_TO_OBJV() \
  int objc;                \
  Tcl_Obj **objv;          \
283
284
285
286
287
288
289

290
291
292
293
294
295
296



297
298
299
300
301
302
303
/*
** Is the loaded version of Tcl one where TIP #285 (asynchronous script
** cancellation) is available?  This should return non-zero only for Tcl
** 8.6 and higher.
*/
static int canUseTip285(){

  int major = -1, minor = -1, patchLevel = -1, type = -1;

  Tcl_GetVersion(&major, &minor, &patchLevel, &type);
  if( major<0 || minor<0 || patchLevel<0 || type<0 ){
    return 0; /* NOTE: Invalid version info, assume bad. */
  }
  return (major>8 || (major==8 && minor>=6));



}

/*
** Creates and initializes a Tcl interpreter for use with the specified TH1
** interpreter.  Stores the created Tcl interpreter in the Tcl context supplied
** by the caller.  This must be declared here because quite a few functions in
** this file need to use it before it can be defined.






>







>
>
>







291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
/*
** Is the loaded version of Tcl one where TIP #285 (asynchronous script
** cancellation) is available?  This should return non-zero only for Tcl
** 8.6 and higher.
*/
static int canUseTip285(){
#if MINIMUM_TCL_VERSION(8, 6)
  int major = -1, minor = -1, patchLevel = -1, type = -1;

  Tcl_GetVersion(&major, &minor, &patchLevel, &type);
  if( major<0 || minor<0 || patchLevel<0 || type<0 ){
    return 0; /* NOTE: Invalid version info, assume bad. */
  }
  return (major>8 || (major==8 && minor>=6));
#else
  return 0;
#endif
}

/*
** Creates and initializes a Tcl interpreter for use with the specified TH1
** interpreter.  Stores the created Tcl interpreter in the Tcl context supplied
** by the caller.  This must be declared here because quite a few functions in
** this file need to use it before it can be defined.
1062
1063
1064
1065
1066
1067
1068

1069
1070
1071

1072
1073
1074
1075
1076
1077
1078
  }
  if( !bWait ) flags |= TCL_DONT_WAIT;
  Tcl_Preserve((ClientData)tclInterp);
  while( Tcl_DoOneEvent(flags) ){
    if( Tcl_InterpDeleted(tclInterp) ){
      break;
    }

    if( useTip285 && Tcl_Canceled(tclInterp, 0)!=TCL_OK ){
      break;
    }

  }
  Tcl_Release((ClientData)tclInterp);
  return rc;
}

/*
** Creates and initializes a Tcl interpreter for use with the specified TH1






>



>







1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
  }
  if( !bWait ) flags |= TCL_DONT_WAIT;
  Tcl_Preserve((ClientData)tclInterp);
  while( Tcl_DoOneEvent(flags) ){
    if( Tcl_InterpDeleted(tclInterp) ){
      break;
    }
#if MINIMUM_TCL_VERSION(8, 6)
    if( useTip285 && Tcl_Canceled(tclInterp, 0)!=TCL_OK ){
      break;
    }
#endif
  }
  Tcl_Release((ClientData)tclInterp);
  return rc;
}

/*
** Creates and initializes a Tcl interpreter for use with the specified TH1
Changes to src/timeline.c.
91
92
93
94
95
96
97

98
99
100
101
102
103
104
#define TIMELINE_DISJOINT 0x0010  /* Elements are not contiguous */
#define TIMELINE_FCHANGES 0x0020  /* Detail file changes */
#define TIMELINE_BRCOLOR  0x0040  /* Background color by branch name */
#define TIMELINE_UCOLOR   0x0080  /* Background color by user */
#define TIMELINE_FRENAMES 0x0100  /* Detail only file name changes */
#define TIMELINE_UNHIDE   0x0200  /* Unhide check-ins with "hidden" tag */
#define TIMELINE_SHOWRID  0x0400  /* Show RID values in addition to UUIDs */

#endif

/*
** Hash a string and use the hash to determine a background color.
*/
char *hash_color(const char *z){
  int i;                       /* Loop counter */






>







91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#define TIMELINE_DISJOINT 0x0010  /* Elements are not contiguous */
#define TIMELINE_FCHANGES 0x0020  /* Detail file changes */
#define TIMELINE_BRCOLOR  0x0040  /* Background color by branch name */
#define TIMELINE_UCOLOR   0x0080  /* Background color by user */
#define TIMELINE_FRENAMES 0x0100  /* Detail only file name changes */
#define TIMELINE_UNHIDE   0x0200  /* Unhide check-ins with "hidden" tag */
#define TIMELINE_SHOWRID  0x0400  /* Show RID values in addition to UUIDs */
#define TIMELINE_BISECT   0x0800  /* Show supplimental bisect information */
#endif

/*
** Hash a string and use the hash to determine a background color.
*/
char *hash_color(const char *z){
  int i;                       /* Loop counter */
227
228
229
230
231
232
233

234
235
236
237
238
239
240
241

242
243
244
245
246
247
248
  int prevWasDivider = 0;     /* True if previous output row was <hr> */
  int fchngQueryInit = 0;     /* True if fchngQuery is initialized */
  Stmt fchngQuery;            /* Query for file changes on check-ins */
  static Stmt qbranch;
  int pendingEndTr = 0;       /* True if a </td></tr> is needed */
  int vid = 0;                /* Current checkout version */
  int dateFormat = 0;         /* 0: HH:MM (default) */

  const char *zDateFmt;

  if( fossil_strcmp(g.zIpAddr, "127.0.0.1")==0 && db_open_local(0) ){
    vid = db_lget_int("checkout", 0);
  }
  zPrevDate[0] = 0;
  mxWikiLen = db_get_int("timeline-max-comment", 0);
  dateFormat = db_get_int("timeline-date-format", 0);

  zDateFmt = P("datefmt");
  if( zDateFmt ) dateFormat = atoi(zDateFmt);
  if( tmFlags & TIMELINE_GRAPH ){
    pGraph = graph_init();
  }
  db_static_prepare(&qbranch,
    "SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0 AND rid=:rid",






>








>







228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
  int prevWasDivider = 0;     /* True if previous output row was <hr> */
  int fchngQueryInit = 0;     /* True if fchngQuery is initialized */
  Stmt fchngQuery;            /* Query for file changes on check-ins */
  static Stmt qbranch;
  int pendingEndTr = 0;       /* True if a </td></tr> is needed */
  int vid = 0;                /* Current checkout version */
  int dateFormat = 0;         /* 0: HH:MM (default) */
  int bCommentGitStyle = 0;   /* Only show comments through first blank line */
  const char *zDateFmt;

  if( fossil_strcmp(g.zIpAddr, "127.0.0.1")==0 && db_open_local(0) ){
    vid = db_lget_int("checkout", 0);
  }
  zPrevDate[0] = 0;
  mxWikiLen = db_get_int("timeline-max-comment", 0);
  dateFormat = db_get_int("timeline-date-format", 0);
  bCommentGitStyle = db_get_int("timeline-truncate-at-blank", 0);
  zDateFmt = P("datefmt");
  if( zDateFmt ) dateFormat = atoi(zDateFmt);
  if( tmFlags & TIMELINE_GRAPH ){
    pGraph = graph_init();
  }
  db_static_prepare(&qbranch,
    "SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0 AND rid=:rid",
398
399
400
401
402
403
404










405
406
407
408
409
410
411
    if( pGraph && zType[0]!='c' ){
      @ &bull;
    }
    if( modPending ){
      @ <span class="modpending">(Awaiting Moderator Approval)</span>
    }
    if( zType[0]=='c' ){










      hyperlink_to_uuid(zUuid);
      if( isLeaf ){
        if( db_exists("SELECT 1 FROM tagxref"
                      " WHERE rid=%d AND tagid=%d AND tagtype>0",
                      rid, TAG_CLOSED) ){
          @ <span class="timelineLeaf">Closed-Leaf:</span>
        }else{






>
>
>
>
>
>
>
>
>
>







401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
    if( pGraph && zType[0]!='c' ){
      @ &bull;
    }
    if( modPending ){
      @ <span class="modpending">(Awaiting Moderator Approval)</span>
    }
    if( zType[0]=='c' ){
      if( tmFlags & TIMELINE_BISECT ){
        static Stmt bisectQuery;
        db_prepare(&bisectQuery, "SELECT seq, stat FROM bilog WHERE rid=:rid");
        db_bind_int(&bisectQuery, ":rid", rid);
        if( db_step(&bisectQuery)==SQLITE_ROW ){
          @ <b>%s(db_column_text(&bisectQuery,1))</b>
          @ (%d(db_column_int(&bisectQuery,0)))
        }
        db_reset(&bisectQuery);
      }
      hyperlink_to_uuid(zUuid);
      if( isLeaf ){
        if( db_exists("SELECT 1 FROM tagxref"
                      " WHERE rid=%d AND tagid=%d AND tagtype>0",
                      rid, TAG_CLOSED) ){
          @ <span class="timelineLeaf">Closed-Leaf:</span>
        }else{
421
422
423
424
425
426
427













428
429
430
431
432
433
434
      @ (%d(rid))
    }
    db_column_blob(pQuery, commentColumn, &comment);
    if( zType[0]!='c' ){
      /* Comments for anything other than a check-in are generated by
      ** "fossil rebuild" and expect to be rendered as text/x-fossil-wiki */
      wiki_convert(&comment, 0, WIKI_INLINE);













    }else if( mxWikiLen>0 && blob_size(&comment)>mxWikiLen ){
      Blob truncated;
      blob_zero(&truncated);
      blob_append(&truncated, blob_buffer(&comment), mxWikiLen);
      blob_append(&truncated, "...", 3);
      @ <span class="timelineComment">%W(blob_str(&truncated))</span>
      blob_reset(&truncated);






>
>
>
>
>
>
>
>
>
>
>
>
>







434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
      @ (%d(rid))
    }
    db_column_blob(pQuery, commentColumn, &comment);
    if( zType[0]!='c' ){
      /* Comments for anything other than a check-in are generated by
      ** "fossil rebuild" and expect to be rendered as text/x-fossil-wiki */
      wiki_convert(&comment, 0, WIKI_INLINE);
    }else if( bCommentGitStyle ){
      /* Truncate comment at first blank line */
      int ii, jj;
      int n = blob_size(&comment);
      char *z = blob_str(&comment);
      for(ii=0; ii<n; ii++){
        if( z[ii]=='\n' ){
          for(jj=ii+1; jj<n && z[jj]!='\n' && fossil_isspace(z[jj]); jj++){}
          if( z[jj]=='\n' ) break;
        }
      }
      z[ii] = 0;
      @ <span class="timelineComment">%W(z)</span>
    }else if( mxWikiLen>0 && blob_size(&comment)>mxWikiLen ){
      Blob truncated;
      blob_zero(&truncated);
      blob_append(&truncated, blob_buffer(&comment), mxWikiLen);
      blob_append(&truncated, "...", 3);
      @ <span class="timelineComment">%W(blob_str(&truncated))</span>
      blob_reset(&truncated);
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
          int mi = i;
          if( pRow->mergeDown & (1<<i) ) mi = -mi;
          cgi_printf("%c%d", cSep, mi);
          cSep = ',';
        }
      }
      if( cSep=='[' ) cgi_printf("[");
      cgi_printf("],h:\"%s\"}%s", pRow->zUuid, pRow->pNext ? ",\n" : "];\n");
    }
    cgi_printf("var nrail = %d\n", pGraph->mxRail+1);
    graph_free(pGraph);
    @ var canvasDiv;
    @ var railPitch;
    @ var mergeOffset;
    @ var node, arrow, arrowSmall, line, mArrow, mLine, wArrow, wLine;






|







752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
          int mi = i;
          if( pRow->mergeDown & (1<<i) ) mi = -mi;
          cgi_printf("%c%d", cSep, mi);
          cSep = ',';
        }
      }
      if( cSep=='[' ) cgi_printf("[");
      cgi_printf("],h:\"%!S\"}%s", pRow->zUuid, pRow->pNext ? ",\n" : "];\n");
    }
    cgi_printf("var nrail = %d\n", pGraph->mxRail+1);
    graph_free(pGraph);
    @ var canvasDiv;
    @ var railPitch;
    @ var mergeOffset;
    @ var node, arrow, arrowSmall, line, mArrow, mLine, wArrow, wLine;
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
}

/*
** Return a pointer to a constant string that forms the basis
** for a timeline query for the WWW interface.
*/
const char *timeline_query_for_www(void){
  static const char *zBase = 0;
  static const char zBaseSql[] =
    @ SELECT
    @   blob.rid AS blobRid,
    @   uuid AS uuid,
    @   datetime(event.mtime%s) AS timestamp,
    @   coalesce(ecomment, comment) AS comment,
    @   coalesce(euser, user) AS user,
    @   blob.rid IN leaf AS leaf,
    @   bgcolor AS bgColor,
    @   event.type AS eventType,
    @   (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref
    @     WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid
    @       AND tagxref.rid=blob.rid AND tagxref.tagtype>0) AS tags,
    @   tagid AS tagid,
    @   brief AS brief,
    @   event.mtime AS mtime
    @  FROM event CROSS JOIN blob
    @ WHERE blob.rid=event.objid
  ;
  if( zBase==0 ){
    zBase = mprintf(zBaseSql /*works-like: "%s"*/, timeline_utc());
  }
  return zBase;
}

/*
** Generate a submenu element with a single parameter change.
*/
static void timeline_submenu(






|
<



|














<
<
<







1040
1041
1042
1043
1044
1045
1046
1047

1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065



1066
1067
1068
1069
1070
1071
1072
}

/*
** Return a pointer to a constant string that forms the basis
** for a timeline query for the WWW interface.
*/
const char *timeline_query_for_www(void){
  static const char zBase[] =

    @ SELECT
    @   blob.rid AS blobRid,
    @   uuid AS uuid,
    @   datetime(event.mtime,toLocal()) AS timestamp,
    @   coalesce(ecomment, comment) AS comment,
    @   coalesce(euser, user) AS user,
    @   blob.rid IN leaf AS leaf,
    @   bgcolor AS bgColor,
    @   event.type AS eventType,
    @   (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref
    @     WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid
    @       AND tagxref.rid=blob.rid AND tagxref.tagtype>0) AS tags,
    @   tagid AS tagid,
    @   brief AS brief,
    @   event.mtime AS mtime
    @  FROM event CROSS JOIN blob
    @ WHERE blob.rid=event.objid
  ;



  return zBase;
}

/*
** Generate a submenu element with a single parameter change.
*/
static void timeline_submenu(
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
** query parameters of timeline into a julianday mtime value.
*/
double symbolic_name_to_mtime(const char *z){
  double mtime;
  int rid;
  if( z==0 ) return -1.0;
  if( fossil_isdate(z) ){
    mtime = db_double(0.0, "SELECT julianday(%Q,'utc')", z);
    if( mtime>0.0 ) return mtime;
  }
  rid = symbolic_name_to_rid(z, "*");
  if( rid ){
    mtime = db_double(0.0, "SELECT mtime FROM event WHERE objid=%d", rid);
  }else{
    mtime = db_double(-1.0,






|







1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
** query parameters of timeline into a julianday mtime value.
*/
double symbolic_name_to_mtime(const char *z){
  double mtime;
  int rid;
  if( z==0 ) return -1.0;
  if( fossil_isdate(z) ){
    mtime = db_double(0.0, "SELECT julianday(%Q,fromLocal())", z);
    if( mtime>0.0 ) return mtime;
  }
  rid = symbolic_name_to_rid(z, "*");
  if( rid ){
    mtime = db_double(0.0, "SELECT mtime FROM event WHERE objid=%d", rid);
  }else{
    mtime = db_double(-1.0,
1190
1191
1192
1193
1194
1195
1196
1197

1198

1199
1200
1201
1202
1203
1204
1205
**    to=UUID          ... to this
**    shortest         ... show only the shortest path
**    uf=FUUID       Show only check-ins that use given file version
**    brbg           Background color from branch name
**    ubg            Background color from user
**    namechng       Show only check-ins that filename changes
**    forks          Show only forks and their children
**    ym=YYYY-MM     Shown only events for the given year/month.

**    datefmt=N      Override the date format

**
** p= and d= can appear individually or together.  If either p= or d=
** appear, then u=, y=, a=, and b= are ignored.
**
** If both a= and b= appear then both upper and lower bounds are honored.
**
** If n= is missing, the default count is 50 for most queries but






|
>

>







1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
**    to=UUID          ... to this
**    shortest         ... show only the shortest path
**    uf=FUUID       Show only check-ins that use given file version
**    brbg           Background color from branch name
**    ubg            Background color from user
**    namechng       Show only check-ins that filename changes
**    forks          Show only forks and their children
**    ym=YYYY-MM     Show only events for the given year/month.
**    ymd=YYYY-MM-DD Show only events on the given day
**    datefmt=N      Override the date format
**    bisect         Show the check-ins that are in the current bisect    
**
** p= and d= can appear individually or together.  If either p= or d=
** appear, then u=, y=, a=, and b= are ignored.
**
** If both a= and b= appear then both upper and lower bounds are honored.
**
** If n= is missing, the default count is 50 for most queries but
1221
1222
1223
1224
1225
1226
1227

1228
1229
1230

1231
1232
1233
1234
1235
1236
1237
  const char *zMark = P("m");        /* Mark this event or an event this time */
  const char *zTagName = P("t");     /* Show events with this tag */
  const char *zBrName = P("r");      /* Show events related to this tag */
  const char *zSearch = P("s");      /* Search string */
  const char *zUses = P("uf");       /* Only show check-ins hold this file */
  const char *zYearMonth = P("ym");  /* Show check-ins for the given YYYY-MM */
  const char *zYearWeek = P("yw");   /* Check-ins for YYYY-WW (week-of-year) */

  int useDividers = P("nd")==0;      /* Show dividers if "nd" is missing */
  int renameOnly = P("namechng")!=0; /* Show only check-ins that rename files */
  int forkOnly = PB("forks");        /* Show only forks and their children */

  int tagid;                         /* Tag ID */
  int tmFlags = 0;                   /* Timeline flags */
  const char *zThisTag = 0;          /* Suppress links to this tag */
  const char *zThisUser = 0;         /* Suppress links to this user */
  HQuery url;                        /* URL for various branch links */
  int from_rid = name_to_typed_rid(P("from"),"ci"); /* from= for paths */
  int to_rid = name_to_typed_rid(P("to"),"ci");    /* to= for path timelines */






>



>







1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
  const char *zMark = P("m");        /* Mark this event or an event this time */
  const char *zTagName = P("t");     /* Show events with this tag */
  const char *zBrName = P("r");      /* Show events related to this tag */
  const char *zSearch = P("s");      /* Search string */
  const char *zUses = P("uf");       /* Only show check-ins hold this file */
  const char *zYearMonth = P("ym");  /* Show check-ins for the given YYYY-MM */
  const char *zYearWeek = P("yw");   /* Check-ins for YYYY-WW (week-of-year) */
  const char *zDay = P("ymd");       /* Check-ins for the day YYYY-MM-DD */
  int useDividers = P("nd")==0;      /* Show dividers if "nd" is missing */
  int renameOnly = P("namechng")!=0; /* Show only check-ins that rename files */
  int forkOnly = PB("forks");        /* Show only forks and their children */
  int bisectOnly = PB("bisect");     /* Show the check-ins of the bisect */
  int tagid;                         /* Tag ID */
  int tmFlags = 0;                   /* Timeline flags */
  const char *zThisTag = 0;          /* Suppress links to this tag */
  const char *zThisUser = 0;         /* Suppress links to this user */
  HQuery url;                        /* URL for various branch links */
  int from_rid = name_to_typed_rid(P("from"),"ci"); /* from= for paths */
  int to_rid = name_to_typed_rid(P("to"),"ci");    /* to= for path timelines */
1268
1269
1270
1271
1272
1273
1274
1275


1276
1277
1278
1279
1280
1281
1282
1283


1284
1285
1286


1287
1288
1289
1290
1291
1292
1293
  /* To view the timeline, must have permission to read project data.
  */
  pd_rid = name_to_typed_rid(P("dp"),"ci");
  if( pd_rid ){
    p_rid = d_rid = pd_rid;
  }
  login_check_credentials();
  if( !g.perm.Read && !g.perm.RdTkt && !g.perm.RdWiki ){


    login_needed(g.anon.Read && g.anon.RdTkt && g.anon.RdWiki);
    return;
  }
  url_initialize(&url, "timeline");
  cgi_query_parameters_to_url(&url);
  if( zTagName && g.perm.Read ){
    tagid = db_int(-1,"SELECT tagid FROM tag WHERE tagname='sym-%q'",zTagName);
    zThisTag = zTagName;


  }else if( zBrName && g.perm.Read ){
    tagid = db_int(-1,"SELECT tagid FROM tag WHERE tagname='sym-%q'",zBrName);
    zThisTag = zBrName;


  }else{
    tagid = 0;
  }
  if( zMark && zMark[0]==0 ){
    if( zAfter ) zMark = zAfter;
    if( zBefore ) zMark = zBefore;
    if( zCirca ) zMark = zCirca;






|
>
>








>
>



>
>







1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
  /* To view the timeline, must have permission to read project data.
  */
  pd_rid = name_to_typed_rid(P("dp"),"ci");
  if( pd_rid ){
    p_rid = d_rid = pd_rid;
  }
  login_check_credentials();
  if( (!g.perm.Read && !g.perm.RdTkt && !g.perm.RdWiki)
   || (bisectOnly && !g.perm.Setup)
  ){
    login_needed(g.anon.Read && g.anon.RdTkt && g.anon.RdWiki);
    return;
  }
  url_initialize(&url, "timeline");
  cgi_query_parameters_to_url(&url);
  if( zTagName && g.perm.Read ){
    tagid = db_int(-1,"SELECT tagid FROM tag WHERE tagname='sym-%q'",zTagName);
    zThisTag = zTagName;
    style_submenu_element("Related", "Related", "%s",
                          url_render(&url, "r", zTagName, "t", 0));
  }else if( zBrName && g.perm.Read ){
    tagid = db_int(-1,"SELECT tagid FROM tag WHERE tagname='sym-%q'",zBrName);
    zThisTag = zBrName;
    style_submenu_element("Branch Only", "only", "%s",
                          url_render(&url, "t", zBrName, "r", 0));
  }else{
    tagid = 0;
  }
  if( zMark && zMark[0]==0 ){
    if( zAfter ) zMark = zAfter;
    if( zBefore ) zMark = zBefore;
    if( zCirca ) zMark = zCirca;
1351
1352
1353
1354
1355
1356
1357












1358
1359
1360
1361
1362
1363
1364
      "     AND pid IN rnfork;",
      TAG_BRANCH, TAG_BRANCH, TAG_BRANCH, TAG_BRANCH
    );
    tmFlags |= TIMELINE_UNHIDE;
    zType = "ci";
    disableY = 1;
  }













  style_header("Timeline");
  login_anonymous_available();
  timeline_temp_table();
  blob_zero(&sql);
  blob_zero(&desc);
  blob_append(&sql, "INSERT OR IGNORE INTO timeline ", -1);






>
>
>
>
>
>
>
>
>
>
>
>







1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
      "     AND pid IN rnfork;",
      TAG_BRANCH, TAG_BRANCH, TAG_BRANCH, TAG_BRANCH
    );
    tmFlags |= TIMELINE_UNHIDE;
    zType = "ci";
    disableY = 1;
  }
  if( bisectOnly
   && fossil_strcmp(g.zIpAddr,"127.0.0.1")==0
   && db_open_local(0)
  ){
    int iCurrent = db_lget_int("checkout",0);
    bisect_create_bilog_table(iCurrent);
    tmFlags |= TIMELINE_UNHIDE | TIMELINE_BISECT;
    zType = "ci";
    disableY = 1;
  }else{
    bisectOnly = 0;
  }

  style_header("Timeline");
  login_anonymous_available();
  timeline_temp_table();
  blob_zero(&sql);
  blob_zero(&desc);
  blob_append(&sql, "INSERT OR IGNORE INTO timeline ", -1);
1483
1484
1485
1486
1487
1488
1489



1490
1491
1492
1493
1494
1495
1496
1497




1498
1499
1500
1501
1502
1503
1504
    }
    if( renameOnly ){
      blob_append_sql(&sql, " AND event.objid IN rnfile ");
    }
    if( forkOnly ){
      blob_append_sql(&sql, " AND event.objid IN rnfork ");
    }



    if( zYearMonth ){
      blob_append_sql(&sql, " AND %Q=strftime('%%Y-%%m',event.mtime) ",
                   zYearMonth);
    }
    else if( zYearWeek ){
      blob_append_sql(&sql, " AND %Q=strftime('%%Y-%%W',event.mtime) ",
                   zYearWeek);
    }




    if( tagid ){
      blob_append_sql(&sql,
        " AND (EXISTS(SELECT 1 FROM tagxref"
            " WHERE tagid=%d AND tagtype>0 AND rid=blob.rid)\n", tagid);

      if( zBrName ){
        /* The next two blob_appendf() calls add SQL that causes check-ins that






>
>
>








>
>
>
>







1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
    }
    if( renameOnly ){
      blob_append_sql(&sql, " AND event.objid IN rnfile ");
    }
    if( forkOnly ){
      blob_append_sql(&sql, " AND event.objid IN rnfork ");
    }
    if( bisectOnly ){
      blob_append_sql(&sql, " AND event.objid IN (SELECT rid FROM bilog) ");
    }
    if( zYearMonth ){
      blob_append_sql(&sql, " AND %Q=strftime('%%Y-%%m',event.mtime) ",
                   zYearMonth);
    }
    else if( zYearWeek ){
      blob_append_sql(&sql, " AND %Q=strftime('%%Y-%%W',event.mtime) ",
                   zYearWeek);
    }
    else if( zDay ){
      blob_append_sql(&sql, " AND %Q=strftime('%%Y-%%m-%%d',event.mtime) ",
                   zDay);
    }
    if( tagid ){
      blob_append_sql(&sql,
        " AND (EXISTS(SELECT 1 FROM tagxref"
            " WHERE tagid=%d AND tagtype>0 AND rid=blob.rid)\n", tagid);

      if( zBrName ){
        /* The next two blob_appendf() calls add SQL that causes check-ins that
1640
1641
1642
1643
1644
1645
1646


1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665




1666
1667
1668
1669
1670
1671
1672
    db_multi_exec("%s", blob_sql_text(&sql));

    n = db_int(0, "SELECT count(*) FROM timeline WHERE etype!='div' /*scan*/");
    if( zYearMonth ){
      blob_appendf(&desc, "%s events for %h", zEType, zYearMonth);
    }else if( zYearWeek ){
      blob_appendf(&desc, "%s events for year/week %h", zEType, zYearWeek);


    }else if( zBefore==0 && zCirca==0 && n>=nEntry && nEntry>0 ){
      blob_appendf(&desc, "%d most recent %ss", n, zEType);
    }else{
      blob_appendf(&desc, "%d %ss", n, zEType);
    }
    if( zUses ){
      char *zFilenames = names_of_file(zUses);
      blob_appendf(&desc, " using file %s version %z%S</a>", zFilenames,
                   href("%R/artifact/%!S",zUses), zUses);
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( renameOnly ){
      blob_appendf(&desc, " that contain filename changes");
      tmFlags |= TIMELINE_DISJOINT|TIMELINE_FRENAMES;
    }
    if( forkOnly ){
      blob_appendf(&desc, " associated with forks");
      tmFlags |= TIMELINE_DISJOINT;
    }




    if( zUser ){
      blob_appendf(&desc, " by user %h", zUser);
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( zTagName ){
      blob_appendf(&desc, " tagged with \"%h\"", zTagName);
      tmFlags |= TIMELINE_DISJOINT;






>
>



















>
>
>
>







1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
    db_multi_exec("%s", blob_sql_text(&sql));

    n = db_int(0, "SELECT count(*) FROM timeline WHERE etype!='div' /*scan*/");
    if( zYearMonth ){
      blob_appendf(&desc, "%s events for %h", zEType, zYearMonth);
    }else if( zYearWeek ){
      blob_appendf(&desc, "%s events for year/week %h", zEType, zYearWeek);
    }else if( zDay ){
      blob_appendf(&desc, "%s events occurring on %h", zEType, zDay);
    }else if( zBefore==0 && zCirca==0 && n>=nEntry && nEntry>0 ){
      blob_appendf(&desc, "%d most recent %ss", n, zEType);
    }else{
      blob_appendf(&desc, "%d %ss", n, zEType);
    }
    if( zUses ){
      char *zFilenames = names_of_file(zUses);
      blob_appendf(&desc, " using file %s version %z%S</a>", zFilenames,
                   href("%R/artifact/%!S",zUses), zUses);
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( renameOnly ){
      blob_appendf(&desc, " that contain filename changes");
      tmFlags |= TIMELINE_DISJOINT|TIMELINE_FRENAMES;
    }
    if( forkOnly ){
      blob_appendf(&desc, " associated with forks");
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( bisectOnly ){
      blob_appendf(&desc, " in the most recent bisect");
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( zUser ){
      blob_appendf(&desc, " by user %h", zUser);
      tmFlags |= TIMELINE_DISJOINT;
    }
    if( zTagName ){
      blob_appendf(&desc, " tagged with \"%h\"", zTagName);
      tmFlags |= TIMELINE_DISJOINT;
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
        db_prepare(&fchngQuery,
           "SELECT (pid<=0) AS isnew,"
           "       (fid==0) AS isdel,"
           "       (SELECT name FROM filename WHERE fnid=mlink.fnid) AS name,"
           "       (SELECT uuid FROM blob WHERE rid=fid),"
           "       (SELECT uuid FROM blob WHERE rid=pid)"
           "  FROM mlink"
           " WHERE mid=:mid AND pid!=fid"
           " ORDER BY 3 /*sort*/"
        );
        fchngQueryInit = 1;
      }
      db_bind_int(&fchngQuery, ":mid", rid);
      while( db_step(&fchngQuery)==SQLITE_ROW ){
        const char *zFilename = db_column_text(&fchngQuery, 2);






|







1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
        db_prepare(&fchngQuery,
           "SELECT (pid<=0) AS isnew,"
           "       (fid==0) AS isdel,"
           "       (SELECT name FROM filename WHERE fnid=mlink.fnid) AS name,"
           "       (SELECT uuid FROM blob WHERE rid=fid),"
           "       (SELECT uuid FROM blob WHERE rid=pid)"
           "  FROM mlink"
           " WHERE mid=:mid AND pid!=fid AND NOT mlink.isaux"
           " ORDER BY 3 /*sort*/"
        );
        fchngQueryInit = 1;
      }
      db_bind_int(&fchngQuery, ":mid", rid);
      while( db_step(&fchngQuery)==SQLITE_ROW ){
        const char *zFilename = db_column_text(&fchngQuery, 2);
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
** a timeline query for display on a TTY.
*/
const char *timeline_query_for_tty(void){
  static const char zBaseSql[] =
    @ SELECT
    @   blob.rid AS rid,
    @   uuid,
    @   datetime(event.mtime%s) AS mDateTime,
    @   coalesce(ecomment,comment)
    @     || ' (user: ' || coalesce(euser,user,'?')
    @     || (SELECT case when length(x)>0 then ' tags: ' || x else '' end
    @           FROM (SELECT group_concat(substr(tagname,5), ', ') AS x
    @                   FROM tag, tagxref
    @                  WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid
    @                    AND tagxref.rid=blob.rid AND tagxref.tagtype>0))
    @     || ')' as comment,
    @   (SELECT count(*) FROM plink WHERE pid=blob.rid AND isprim)
    @        AS primPlinkCount,
    @   (SELECT count(*) FROM plink WHERE cid=blob.rid) AS plinkCount,
    @   event.mtime AS mtime,
    @   tagxref.value AS branch
    @ FROM tag CROSS JOIN event CROSS JOIN blob
    @      LEFT JOIN tagxref ON tagxref.tagid=tag.tagid
    @   AND tagxref.tagtype>0
    @   AND tagxref.rid=blob.rid
    @ WHERE blob.rid=event.objid
    @   AND tag.tagname='branch'
  ;
  return mprintf(zBaseSql /*works-like: "%s"*/, timeline_utc());
}

/*
** Return true if the input string is a date in the ISO 8601 format:
** YYYY-MM-DD.
*/
static int isIsoDate(const char *z){






|




















|







1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
** a timeline query for display on a TTY.
*/
const char *timeline_query_for_tty(void){
  static const char zBaseSql[] =
    @ SELECT
    @   blob.rid AS rid,
    @   uuid,
    @   datetime(event.mtime,toLocal()) AS mDateTime,
    @   coalesce(ecomment,comment)
    @     || ' (user: ' || coalesce(euser,user,'?')
    @     || (SELECT case when length(x)>0 then ' tags: ' || x else '' end
    @           FROM (SELECT group_concat(substr(tagname,5), ', ') AS x
    @                   FROM tag, tagxref
    @                  WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid
    @                    AND tagxref.rid=blob.rid AND tagxref.tagtype>0))
    @     || ')' as comment,
    @   (SELECT count(*) FROM plink WHERE pid=blob.rid AND isprim)
    @        AS primPlinkCount,
    @   (SELECT count(*) FROM plink WHERE cid=blob.rid) AS plinkCount,
    @   event.mtime AS mtime,
    @   tagxref.value AS branch
    @ FROM tag CROSS JOIN event CROSS JOIN blob
    @      LEFT JOIN tagxref ON tagxref.tagid=tag.tagid
    @   AND tagxref.tagtype>0
    @   AND tagxref.rid=blob.rid
    @ WHERE blob.rid=event.objid
    @   AND tag.tagname='branch'
  ;
  return zBaseSql;
}

/*
** Return true if the input string is a date in the ISO 8601 format:
** YYYY-MM-DD.
*/
static int isIsoDate(const char *z){
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
    const char *zShift = "";
    if( mode==3 || mode==4 ){
      fossil_fatal("cannot compute descendants or ancestors of a date");
    }
    if( mode==0 ){
      if( isIsoDate(zOrigin) ) zShift = ",'+1 day'";
    }
    zDate = mprintf("(SELECT julianday(%Q%s, 'utc'))", zOrigin, zShift);
  }

  if( zFilePattern ){
    if( zType==0 ){
      /* When zFilePattern is specified and type is not specified, only show
       * file check-ins */
      zType="ci";






|







2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
    const char *zShift = "";
    if( mode==3 || mode==4 ){
      fossil_fatal("cannot compute descendants or ancestors of a date");
    }
    if( mode==0 ){
      if( isIsoDate(zOrigin) ) zShift = ",'+1 day'";
    }
    zDate = mprintf("(SELECT julianday(%Q%s, fromLocal()))", zOrigin, zShift);
  }

  if( zFilePattern ){
    if( zType==0 ){
      /* When zFilePattern is specified and type is not specified, only show
       * file check-ins */
      zType="ci";
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
  }
  db_prepare(&q, "%s", blob_sql_text(&sql));
  blob_reset(&sql);
  print_timeline(&q, n, width, verboseFlag);
  db_finalize(&q);
}

/*
** Return one of two things:
**
**   ",'localtime'"  if the timeline-utc property is set to 0.
**
**   ""              (empty string) otherwise.
*/
const char *timeline_utc(){
  if( g.fTimeFormat==0 ){
    if( db_get_int("timeline-utc", 1) ){
      g.fTimeFormat = 1;
    }else{
      g.fTimeFormat = 2;
    }
  }
  if( g.fTimeFormat==1 ){
    return "";
  }else{
    return ",'localtime'";
  }
}


/*
** COMMAND: test-timewarp-list
**
** Usage: %fossil test-timewarp-list ?-v|---verbose?
**
** Display all instances of child check-ins that appear earlier in time






<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







2200
2201
2202
2203
2204
2205
2206






















2207
2208
2209
2210
2211
2212
2213
  }
  db_prepare(&q, "%s", blob_sql_text(&sql));
  blob_reset(&sql);
  print_timeline(&q, n, width, verboseFlag);
  db_finalize(&q);
}
























/*
** COMMAND: test-timewarp-list
**
** Usage: %fossil test-timewarp-list ?-v|---verbose?
**
** Display all instances of child check-ins that appear earlier in time
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221

2222
2223
2224
2225
2226
2227
2228
2229
2230
2231



2232
2233
2234



2235
2236
2237














2238
2239







2240
2241






2242
2243
         db_column_text(&q, 3));
    }
  }
  db_finalize(&q);
}

/*
** WEBPAGE: test_timewarps
**
** Show all check-ins that are "timewarps".  A timewarp is a
** check-in that occurs before its parent, according to the
** timestamp information on the check-in.  This can only actually
** happen, of course, if a users system clock is set incorrectly.
*/
void test_timewarp_page(void){
  Stmt q;


  login_check_credentials();
  if( !g.perm.Read || !g.perm.Hyperlink ){
    login_needed(g.anon.Read && g.anon.Hyperlink);
    return;
  }
  style_header("Instances of timewarp");
  @ <ul>
  db_prepare(&q,
     "SELECT blob.uuid "



     "  FROM plink p, plink c, blob"
     " WHERE p.cid=c.pid  AND p.mtime>c.mtime"
     "   AND blob.rid=c.cid"



  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zUuid = db_column_text(&q, 0);














    @ <li>
    @ <a href="%R/timeline?dp=%!S(zUuid)&amp;unhide">%S(zUuid)</a>







  }
  db_finalize(&q);






  style_footer();
}






|








>







<

|
>
>
>
|


>
>
>


|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
<
>
>
>
>
>
>
>


>
>
>
>
>
>


2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264

2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293

2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
         db_column_text(&q, 3));
    }
  }
  db_finalize(&q);
}

/*
** WEBPAGE: timewarps
**
** Show all check-ins that are "timewarps".  A timewarp is a
** check-in that occurs before its parent, according to the
** timestamp information on the check-in.  This can only actually
** happen, of course, if a users system clock is set incorrectly.
*/
void test_timewarp_page(void){
  Stmt q;
  int cnt = 0;

  login_check_credentials();
  if( !g.perm.Read || !g.perm.Hyperlink ){
    login_needed(g.anon.Read && g.anon.Hyperlink);
    return;
  }
  style_header("Instances of timewarp");

  db_prepare(&q,
     "SELECT blob.uuid, "
     "       date(ce.mtime),"
     "       pe.mtime>ce.mtime,"
     "       coalesce(ce.euser,ce.user)"
     "  FROM plink p, plink c, blob, event pe, event ce"
     " WHERE p.cid=c.pid  AND p.mtime>c.mtime"
     "   AND blob.rid=c.cid"
     "   AND pe.objid=p.cid"
     "   AND ce.objid=c.cid"
     " ORDER BY 2 DESC"
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zCkin = db_column_text(&q, 0);
    const char *zDate = db_column_text(&q, 1);
    const char *zStatus = db_column_int(&q,2) ? "Open"
                                 : "Resolved by editing date";
    const char *zUser = db_column_text(&q, 3);
    char *zHref = href("%R/timeline?c=%S", zCkin);
    if( cnt==0 ){
      @ <div class="brlist"><table id="timewarptable">
      @ <thead><tr>
      @ <th>Check-in</th>
      @ <th>Date</th>
      @ <th>User</th>
      @ <th>Status</th>
      @ </tr></thead><tbody>
    }
    @ <tr>

    @ <td>%s(zHref)%S(zCkin)</a></td>
    @ <td>%s(zHref)%s(zDate)</a></td>
    @ <td>%h(zUser)</td>
    @ <td>%s(zStatus)</td>
    @ </tr>
    fossil_free(zHref);
    cnt++;
  }
  db_finalize(&q);
  if( cnt==0 ){
    @ <p>No timewarps in this repository</p>
  }else{
    @ </tbody></table></div>
    output_table_sorting_javascript("timewarptable","tttt",2);
  }
  style_footer();
}
Changes to src/tkt.c.
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
*/
static void initializeVariablesFromDb(void){
  const char *zName;
  Stmt q;
  int i, n, size, j;

  zName = PD("name","-none-");
  db_prepare(&q, "SELECT datetime(tkt_mtime%s) AS tkt_datetime, *"
                 "  FROM ticket WHERE tkt_uuid GLOB '%q*'",
                 timeline_utc(), zName);
  if( db_step(&q)==SQLITE_ROW ){
    n = db_column_count(&q);
    for(i=0; i<n; i++){
      const char *zVal = db_column_text(&q, i);
      const char *zName = db_column_name(&q, i);
      char *zRevealed = 0;
      if( zVal==0 ){






|

|







135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
*/
static void initializeVariablesFromDb(void){
  const char *zName;
  Stmt q;
  int i, n, size, j;

  zName = PD("name","-none-");
  db_prepare(&q, "SELECT datetime(tkt_mtime,toLocal()) AS tkt_datetime, *"
                 "  FROM ticket WHERE tkt_uuid GLOB '%q*'",
                 zName);
  if( db_step(&q)==SQLITE_ROW ){
    n = db_column_count(&q);
    for(i=0; i<n; i++){
      const char *zVal = db_column_text(&q, i);
      const char *zName = db_column_name(&q, i);
      char *zRevealed = 0;
      if( zVal==0 ){
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
  tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname GLOB 'tkt-%q*'",zUuid);
  if( tagid==0 ){
    @ No such ticket: %h(zUuid)
    style_footer();
    return;
  }
  db_prepare(&q,
    "SELECT datetime(mtime%s), objid, uuid, NULL, NULL, NULL"
    "  FROM event, blob"
    " WHERE objid IN (SELECT rid FROM tagxref WHERE tagid=%d)"
    "   AND blob.rid=event.objid"
    " UNION "
    "SELECT datetime(mtime%s), attachid, uuid, src, filename, user"
    "  FROM attachment, blob"
    " WHERE target=(SELECT substr(tagname,5) FROM tag WHERE tagid=%d)"
    "   AND blob.rid=attachid"
    " ORDER BY 1",
    timeline_utc(), tagid, timeline_utc(), tagid
  );
  while( db_step(&q)==SQLITE_ROW ){
    Manifest *pTicket;
    const char *zDate = db_column_text(&q, 0);
    int rid = db_column_int(&q, 1);
    const char *zChngUuid = db_column_text(&q, 2);
    const char *zFile = db_column_text(&q, 4);






|




|




|







944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
  tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname GLOB 'tkt-%q*'",zUuid);
  if( tagid==0 ){
    @ No such ticket: %h(zUuid)
    style_footer();
    return;
  }
  db_prepare(&q,
    "SELECT datetime(mtime,toLocal()), objid, uuid, NULL, NULL, NULL"
    "  FROM event, blob"
    " WHERE objid IN (SELECT rid FROM tagxref WHERE tagid=%d)"
    "   AND blob.rid=event.objid"
    " UNION "
    "SELECT datetime(mtime,toLocal()), attachid, uuid, src, filename, user"
    "  FROM attachment, blob"
    " WHERE target=(SELECT substr(tagname,5) FROM tag WHERE tagid=%d)"
    "   AND blob.rid=attachid"
    " ORDER BY 1",
    tagid, tagid
  );
  while( db_step(&q)==SQLITE_ROW ){
    Manifest *pTicket;
    const char *zDate = db_column_text(&q, 0);
    int rid = db_column_int(&q, 1);
    const char *zChngUuid = db_column_text(&q, 2);
    const char *zFile = db_column_text(&q, 4);
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
        }
        tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname GLOB 'tkt-%q*'",
                       zTktUuid);
        if( tagid==0 ){
          fossil_fatal("no such ticket %h", zTktUuid);
        }
        db_prepare(&q,
          "SELECT datetime(mtime%s), objid, NULL, NULL, NULL"
          "  FROM event, blob"
          " WHERE objid IN (SELECT rid FROM tagxref WHERE tagid=%d)"
          "   AND blob.rid=event.objid"
          " UNION "
          "SELECT datetime(mtime%s), attachid, filename, "
          "       src, user"
          "  FROM attachment, blob"
          " WHERE target=(SELECT substr(tagname,5) FROM tag WHERE tagid=%d)"
          "   AND blob.rid=attachid"
          " ORDER BY 1 DESC",
          timeline_utc(), tagid, timeline_utc(), tagid
        );
        while( db_step(&q)==SQLITE_ROW ){
          Manifest *pTicket;
          const char *zDate = db_column_text(&q, 0);
          int rid = db_column_int(&q, 1);
          const char *zFile = db_column_text(&q, 2);
          if( zFile!=0 ){






|




|





|







1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
        }
        tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname GLOB 'tkt-%q*'",
                       zTktUuid);
        if( tagid==0 ){
          fossil_fatal("no such ticket %h", zTktUuid);
        }
        db_prepare(&q,
          "SELECT datetime(mtime,toLocal()), objid, NULL, NULL, NULL"
          "  FROM event, blob"
          " WHERE objid IN (SELECT rid FROM tagxref WHERE tagid=%d)"
          "   AND blob.rid=event.objid"
          " UNION "
          "SELECT datetime(mtime,toLocal()), attachid, filename, "
          "       src, user"
          "  FROM attachment, blob"
          " WHERE target=(SELECT substr(tagname,5) FROM tag WHERE tagid=%d)"
          "   AND blob.rid=attachid"
          " ORDER BY 1 DESC",
          tagid, tagid
        );
        while( db_step(&q)==SQLITE_ROW ){
          Manifest *pTicket;
          const char *zDate = db_column_text(&q, 0);
          int rid = db_column_int(&q, 1);
          const char *zFile = db_column_text(&q, 2);
          if( zFile!=0 ){
Changes to src/tktsetup.c.
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
@ CREATE INDEX ticketchng_idx1 ON ticketchng(tkt_id, tkt_mtime);
;

/*
** Return the ticket table definition
*/
const char *ticket_table_schema(void){
  return db_get("ticket-table", (char*)zDefaultTicketTable);
}

/*
** Common implementation for the ticket setup editor pages.
*/
static void tktsetup_generic(
  const char *zTitle,           /* Page title */






|







97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
@ CREATE INDEX ticketchng_idx1 ON ticketchng(tkt_id, tkt_mtime);
;

/*
** Return the ticket table definition
*/
const char *ticket_table_schema(void){
  return db_get("ticket-table", zDefaultTicketTable);
}

/*
** Common implementation for the ticket setup editor pages.
*/
static void tktsetup_generic(
  const char *zTitle,           /* Page title */
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  }
  if( PB("setup") ){
    cgi_redirect("tktsetup");
  }
  isSubmit = P("submit")!=0;
  z = P("x");
  if( z==0 ){
    z = db_get(zDbField, (char*)zDfltValue);
  }
  style_header("Edit %s", zTitle);
  if( P("clear")!=0 ){
    login_verify_csrf_secret();
    db_unset(zDbField, 0);
    if( xRebuild ) xRebuild();
    cgi_redirect("tktsetup");






|







126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  }
  if( PB("setup") ){
    cgi_redirect("tktsetup");
  }
  isSubmit = P("submit")!=0;
  z = P("x");
  if( z==0 ){
    z = db_get(zDbField, zDfltValue);
  }
  style_header("Edit %s", zTitle);
  if( P("clear")!=0 ){
    login_verify_csrf_secret();
    db_unset(zDbField, 0);
    if( xRebuild ) xRebuild();
    cgi_redirect("tktsetup");
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@ }
;

/*
** Return the ticket common code.
*/
const char *ticket_common_code(void){
  return db_get("ticket-common", (char*)zDefaultTicketCommon);
}

/*
** WEBPAGE: tktsetup_com
** Administrative page used to define TH1 script that is
** common to all ticket screens.
*/






|







238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
@ }
;

/*
** Return the ticket common code.
*/
const char *ticket_common_code(void){
  return db_get("ticket-common", zDefaultTicketCommon);
}

/*
** WEBPAGE: tktsetup_com
** Administrative page used to define TH1 script that is
** common to all ticket screens.
*/
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
@ return
;

/*
** Return the ticket change code.
*/
const char *ticket_change_code(void){
  return db_get("ticket-change", (char*)zDefaultTicketChange);
}

/*
** WEBPAGE: tktsetup_change
** Adminstrative screen used to view or edit the TH1 script
** that shows ticket changes.
*/






|







270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
@ return
;

/*
** Return the ticket change code.
*/
const char *ticket_change_code(void){
  return db_get("ticket-change", zDefaultTicketChange);
}

/*
** WEBPAGE: tktsetup_change
** Adminstrative screen used to view or edit the TH1 script
** that shows ticket changes.
*/
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
@ </table>
;

/*
** Return the code used to generate the new ticket page
*/
const char *ticket_newpage_code(void){
  return db_get("ticket-newpage", (char*)zDefaultNew);
}

/*
** WEBPAGE: tktsetup_newpage
** Administrative page used to view or edit the TH1 script used
** to enter new tickets.
*/






|







415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
@ </table>
;

/*
** Return the code used to generate the new ticket page
*/
const char *ticket_newpage_code(void){
  return db_get("ticket-newpage", zDefaultNew);
}

/*
** WEBPAGE: tktsetup_newpage
** Administrative page used to view or edit the TH1 script used
** to enter new tickets.
*/
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
;


/*
** Return the code used to generate the view ticket page
*/
const char *ticket_viewpage_code(void){
  return db_get("ticket-viewpage", (char*)zDefaultView);
}

/*
** WEBPAGE: tktsetup_viewpage
** Administrative page used to view or edit the TH1 script that
** displays individual tickets.
*/






|







556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
;


/*
** Return the code used to generate the view ticket page
*/
const char *ticket_viewpage_code(void){
  return db_get("ticket-viewpage", zDefaultView);
}

/*
** WEBPAGE: tktsetup_viewpage
** Administrative page used to view or edit the TH1 script that
** displays individual tickets.
*/
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
@ </table>
;

/*
** Return the code used to generate the edit ticket page
*/
const char *ticket_editpage_code(void){
  return db_get("ticket-editpage", (char*)zDefaultEdit);
}

/*
** WEBPAGE: tktsetup_editpage
** Administrative page for viewing or editing the TH1 script that
** drives the ticket editing page.
*/






|







697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
@ </table>
;

/*
** Return the code used to generate the edit ticket page
*/
const char *ticket_editpage_code(void){
  return db_get("ticket-editpage", zDefaultEdit);
}

/*
** WEBPAGE: tktsetup_editpage
** Administrative page for viewing or editing the TH1 script that
** drives the ticket editing page.
*/
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
@ </th1>
;

/*
** Return the code used to generate the report list
*/
const char *ticket_reportlist_code(void){
  return db_get("ticket-reportlist", (char*)zDefaultReportList);
}

/*
** WEBPAGE: tktsetup_reportlist
** Administrative page used to view or edit the TH1 script that
** defines the "report list" page.
*/






|







753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
@ </th1>
;

/*
** Return the code used to generate the report list
*/
const char *ticket_reportlist_code(void){
  return db_get("ticket-reportlist", zDefaultReportList);
}

/*
** WEBPAGE: tktsetup_reportlist
** Administrative page used to view or edit the TH1 script that
** defines the "report list" page.
*/
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
;


/*
** Return the template ticket report format:
*/
const char *ticket_key_template(void){
  return db_get("ticket-key-template", (char*)zDefaultKey);
}

/*
** WEBPAGE: tktsetup_keytplt
**
** Administrative page used to view or edit the Key template
** for tickets.






|







846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
;


/*
** Return the template ticket report format:
*/
const char *ticket_key_template(void){
  return db_get("ticket-key-template", zDefaultKey);
}

/*
** WEBPAGE: tktsetup_keytplt
**
** Administrative page used to view or edit the Key template
** for tickets.
Changes to src/undo.c.
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    old_exists = db_column_int(&q, 1);
    old_exe = db_column_int(&q, 2);
    if( old_exists ){
      db_ephemeral_blob(&q, 0, &new);
    }
    if( old_exists ){
      if( new_exists ){
        fossil_print("%s %s\n", redoFlag ? "REDO" : "UNDO", zPathname);
      }else{
        fossil_print("NEW %s\n", zPathname);
      }
      if( new_exists && (new_link || old_link) ){
        file_delete(zFullname);
      }
      if( old_link ){
        symlink_create(blob_str(&new), zFullname);
      }else{






|

|







73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
    old_exists = db_column_int(&q, 1);
    old_exe = db_column_int(&q, 2);
    if( old_exists ){
      db_ephemeral_blob(&q, 0, &new);
    }
    if( old_exists ){
      if( new_exists ){
        fossil_print("%s   %s\n", redoFlag ? "REDO" : "UNDO", zPathname);
      }else{
        fossil_print("NEW    %s\n", zPathname);
      }
      if( new_exists && (new_link || old_link) ){
        file_delete(zFullname);
      }
      if( old_link ){
        symlink_create(blob_str(&new), zFullname);
      }else{
Changes to src/update.c.
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
    zFile = db_column_text(&q, 0);
    zFull = mprintf("%/%/", g.zLocalRoot, zFile);
    errCode = historical_version_of_file(zRevision, zFile, &record,
                                         &isLink, &isExe, 0, 2);
    if( errCode==2 ){
      if( db_int(0, "SELECT rid FROM vfile WHERE pathname=%Q OR origname=%Q",
                 zFile, zFile)==0 ){
        fossil_print("UNMANAGE: %s\n", zFile);
      }else{
        undo_save(zFile);
        file_delete(zFull);
        fossil_print("DELETE: %s\n", zFile);
      }
      db_multi_exec(
        "UPDATE OR REPLACE vfile"
        "   SET pathname=origname, origname=NULL"
        " WHERE pathname=%Q AND origname!=pathname;"
        "DELETE FROM vfile WHERE pathname=%Q",
        zFile, zFile
      );
    }else{
      sqlite3_int64 mtime;
      undo_save(zFile);
      if( file_wd_size(zFull)>=0 && (isLink || file_wd_islink(0)) ){
        file_delete(zFull);
      }
      if( isLink ){
        symlink_create(blob_str(&record), zFull);
      }else{
        blob_write_to_file(&record, zFull);
      }
      file_wd_setexe(zFull, isExe);
      fossil_print("REVERTED: %s\n", zFile);
      mtime = file_wd_mtime(zFull);
      db_multi_exec(
         "UPDATE vfile"
         "   SET mtime=%lld, chnged=0, deleted=0, isexe=%d, islink=%d,mrid=rid"
         " WHERE pathname=%Q OR origname=%Q",
         mtime, isExe, isLink, zFile, zFile
      );






|



|




















|







783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
    zFile = db_column_text(&q, 0);
    zFull = mprintf("%/%/", g.zLocalRoot, zFile);
    errCode = historical_version_of_file(zRevision, zFile, &record,
                                         &isLink, &isExe, 0, 2);
    if( errCode==2 ){
      if( db_int(0, "SELECT rid FROM vfile WHERE pathname=%Q OR origname=%Q",
                 zFile, zFile)==0 ){
        fossil_print("UNMANAGE %s\n", zFile);
      }else{
        undo_save(zFile);
        file_delete(zFull);
        fossil_print("DELETE   %s\n", zFile);
      }
      db_multi_exec(
        "UPDATE OR REPLACE vfile"
        "   SET pathname=origname, origname=NULL"
        " WHERE pathname=%Q AND origname!=pathname;"
        "DELETE FROM vfile WHERE pathname=%Q",
        zFile, zFile
      );
    }else{
      sqlite3_int64 mtime;
      undo_save(zFile);
      if( file_wd_size(zFull)>=0 && (isLink || file_wd_islink(0)) ){
        file_delete(zFull);
      }
      if( isLink ){
        symlink_create(blob_str(&record), zFull);
      }else{
        blob_write_to_file(&record, zFull);
      }
      file_wd_setexe(zFull, isExe);
      fossil_print("REVERT   %s\n", zFile);
      mtime = file_wd_mtime(zFull);
      db_multi_exec(
         "UPDATE vfile"
         "   SET mtime=%lld, chnged=0, deleted=0, isexe=%d, islink=%d,mrid=rid"
         " WHERE pathname=%Q OR origname=%Q",
         mtime, isExe, isLink, zFile, zFile
      );
Changes to src/user.c.
425
426
427
428
429
430
431

432
433
434
435

436
437
438
439
440
441
442
  int y = atoi(PD("y","3"));
  int n = atoi(PD("n","200"));
  int skip = atoi(PD("o","0"));
  Blob sql;
  Stmt q;
  int cnt = 0;
  int rc;


  login_check_credentials();
  if( !g.perm.Admin ){ login_needed(0); return; }
  create_accesslog_table();


  if( P("delall") && P("delallbtn") ){
    db_multi_exec("DELETE FROM accesslog");
    cgi_redirectf("%s/access_log?y=%d&n=%d&o=%o", g.zTop, y, n, skip);
    return;
  }
  if( P("delanon") && P("delanonbtn") ){






>




>







425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
  int y = atoi(PD("y","3"));
  int n = atoi(PD("n","200"));
  int skip = atoi(PD("o","0"));
  Blob sql;
  Stmt q;
  int cnt = 0;
  int rc;
  int fLogEnabled;

  login_check_credentials();
  if( !g.perm.Admin ){ login_needed(0); return; }
  create_accesslog_table();


  if( P("delall") && P("delallbtn") ){
    db_multi_exec("DELETE FROM accesslog");
    cgi_redirectf("%s/access_log?y=%d&n=%d&o=%o", g.zTop, y, n, skip);
    return;
  }
  if( P("delanon") && P("delanonbtn") ){
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476




477
478
479
480
481
482
483
484
                  " LIMIT -1 OFFSET 200)");
    cgi_redirectf("%s/access_log?y=%d&n=%d", g.zTop, y, n);
    return;
  }
  style_header("Access Log");
  blob_zero(&sql);
  blob_append_sql(&sql,
    "SELECT uname, ipaddr, datetime(mtime%s), success"
    "  FROM accesslog", timeline_utc()
  );
  if( y==1 ){
    blob_append(&sql, "  WHERE success", -1);
  }else if( y==2 ){
    blob_append(&sql, "  WHERE NOT success", -1);
  }
  blob_append_sql(&sql,"  ORDER BY rowid DESC LIMIT %d OFFSET %d", n+1, skip);
  if( skip ){
    style_submenu_element("Newer", "Newer entries",
              "%s/access_log?o=%d&n=%d&y=%d", g.zTop, skip>=n ? skip-n : 0,
              n, y);
  }
  rc = db_prepare_ignore_error(&q, "%s", blob_sql_text(&sql));




  @ <center><table border="1" cellpadding="5" id='logtable'>
  @ <thead><tr><th width="33%%">Date</th><th width="34%%">User</th>
  @ <th width="33%%">IP Address</th></tr></thead><tbody>
  while( rc==SQLITE_OK && db_step(&q)==SQLITE_ROW ){
    const char *zName = db_column_text(&q, 0);
    const char *zIP = db_column_text(&q, 1);
    const char *zDate = db_column_text(&q, 2);
    int bSuccess = db_column_int(&q, 3);






|
|













>
>
>
>
|







457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
                  " LIMIT -1 OFFSET 200)");
    cgi_redirectf("%s/access_log?y=%d&n=%d", g.zTop, y, n);
    return;
  }
  style_header("Access Log");
  blob_zero(&sql);
  blob_append_sql(&sql,
    "SELECT uname, ipaddr, datetime(mtime,toLocal()), success"
    "  FROM accesslog"
  );
  if( y==1 ){
    blob_append(&sql, "  WHERE success", -1);
  }else if( y==2 ){
    blob_append(&sql, "  WHERE NOT success", -1);
  }
  blob_append_sql(&sql,"  ORDER BY rowid DESC LIMIT %d OFFSET %d", n+1, skip);
  if( skip ){
    style_submenu_element("Newer", "Newer entries",
              "%s/access_log?o=%d&n=%d&y=%d", g.zTop, skip>=n ? skip-n : 0,
              n, y);
  }
  rc = db_prepare_ignore_error(&q, "%s", blob_sql_text(&sql));
  @ <center>
  fLogEnabled = db_get_boolean("access-log", 0);
  @ <div>Access logging is %s(fLogEnabled?"on":"off").
  @ (Change this on the <a href="setup_settings">settings</a> page.)</div>
  @ <table border="1" cellpadding="5" id='logtable'>
  @ <thead><tr><th width="33%%">Date</th><th width="34%%">User</th>
  @ <th width="33%%">IP Address</th></tr></thead><tbody>
  while( rc==SQLITE_OK && db_step(&q)==SQLITE_ROW ){
    const char *zName = db_column_text(&q, 0);
    const char *zIP = db_column_text(&q, 1);
    const char *zDate = db_column_text(&q, 2);
    int bSuccess = db_column_int(&q, 3);
Changes to src/utf8.c.
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

194
195
196
197
198
199
200
#if defined(__APPLE__) && !defined(WITHOUT_ICONV)
# include <iconv.h>
#endif

/*
** Translate text from the filename character set into UTF-8.
** Return a pointer to the translated text.
** Call fossil_filename_free() to deallocate any memory used to store the
** returned pointer when done.
**
** This function must not convert '\' to '/' on windows/cygwin, as it is
** used in places where we are not sure it's really filenames we are handling,
** e.g. fossil_getenv() or handling the argv arguments from main().
**
** On Windows, translate some characters in the in the range
** U+F001 - U+F07F (private use area) to ASCII. Cygwin sometimes
** generates such filenames. See:
** <http://cygwin.com/cygwin-ug-net/using-specialnames.html>
*/
char *fossil_filename_to_utf8(const void *zFilename){
#if defined(_WIN32)
  int nByte = WideCharToMultiByte(CP_UTF8, 0, zFilename, -1, 0, 0, 0, 0);
  char *zUtf = sqlite3_malloc( nByte );
  char *pUtf, *qUtf;
  if( zUtf==0 ){
    return 0;
  }
  WideCharToMultiByte(CP_UTF8, 0, zFilename, -1, zUtf, nByte, 0, 0);
  pUtf = qUtf = zUtf;
  while( *pUtf ) {
    if( *pUtf == (char)0xef ){
      wchar_t c = ((pUtf[1]&0x3f)<<6)|(pUtf[2]&0x3f);
      /* Only really convert it when the resulting char is in range. */
      if( c && ((c < ' ') || wcschr(L"\"*:<>?|", c)) ){
        *qUtf++ = c; pUtf+=3; continue;
      }
    }
    *qUtf++ = *pUtf++;
  }
  *qUtf = 0;
  return zUtf;
#elif defined(__CYGWIN__)
  char *zOut;
  zOut = fossil_strdup(zFilename);
  return zOut;
#elif defined(__APPLE__) && !defined(WITHOUT_ICONV)
  char *zIn = (char*)zFilename;
  char *zOut;
  iconv_t cd;
  size_t n, x;
  for(n=0; zIn[n]>0 && zIn[n]<=0x7f; n++){}
  if( zIn[n]!=0 && (cd = iconv_open("UTF-8", "UTF-8-MAC"))!=(iconv_t)-1 ){
    char *zOutx;
    char *zOrig = zIn;
    size_t nIn, nOutx;
    nIn = n = strlen(zIn);
    nOutx = nIn+100;
    zOutx = zOut = fossil_malloc( nOutx+1 );
    x = iconv(cd, &zIn, &nIn, &zOutx, &nOutx);
    if( x==(size_t)-1 ){
      fossil_free(zOut);
      zOut = fossil_strdup(zOrig);
    }else{
      zOut[n+100-nOutx] = 0;
    }
    iconv_close(cd);
  }else{
    zOut = fossil_strdup(zFilename);
  }
  return zOut;
#else
  return (char *)zFilename;  /* No-op on non-mac unix */
#endif
}

/*
** Translate text from UTF-8 to the filename character set.
** Return a pointer to the translated text.
** Call fossil_filename_free() to deallocate any memory used to store the
** returned pointer when done.
**
** On Windows, characters in the range U+0001 to U+0031 and the
** characters '"', '*', ':', '<', '>', '?' and '|' are invalid
** to be used, except in the 'extended path' prefix ('?') and
** as drive specifier (':'). Therefore, translate those to characters
** in the range U+F001 - U+F07F (private use area), so those
** characters never arrive in any Windows API. The filenames might
** look strange in Windows explorer, but in the cygwin shell
** everything looks as expected.
**
** See: <http://cygwin.com/cygwin-ug-net/using-specialnames.html>
**
*/
void *fossil_utf8_to_filename(const char *zUtf8){
#ifdef _WIN32

  int nChar = MultiByteToWideChar(CP_UTF8, 0, zUtf8, -1, 0, 0);
  /* Overallocate 6 chars, making some room for extended paths */
  wchar_t *zUnicode = sqlite3_malloc( (nChar+6) * sizeof(wchar_t) );
  wchar_t *wUnicode = zUnicode;
  if( zUnicode==0 ){
    return 0;
  }






|











|

|





|















|


|




















|



|






|














|

>







99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
#if defined(__APPLE__) && !defined(WITHOUT_ICONV)
# include <iconv.h>
#endif

/*
** Translate text from the filename character set into UTF-8.
** Return a pointer to the translated text.
** Call fossil_path_free() to deallocate any memory used to store the
** returned pointer when done.
**
** This function must not convert '\' to '/' on windows/cygwin, as it is
** used in places where we are not sure it's really filenames we are handling,
** e.g. fossil_getenv() or handling the argv arguments from main().
**
** On Windows, translate some characters in the in the range
** U+F001 - U+F07F (private use area) to ASCII. Cygwin sometimes
** generates such filenames. See:
** <http://cygwin.com/cygwin-ug-net/using-specialnames.html>
*/
char *fossil_path_to_utf8(const void *zPath){
#if defined(_WIN32)
  int nByte = WideCharToMultiByte(CP_UTF8, 0, zPath, -1, 0, 0, 0, 0);
  char *zUtf = sqlite3_malloc( nByte );
  char *pUtf, *qUtf;
  if( zUtf==0 ){
    return 0;
  }
  WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zUtf, nByte, 0, 0);
  pUtf = qUtf = zUtf;
  while( *pUtf ) {
    if( *pUtf == (char)0xef ){
      wchar_t c = ((pUtf[1]&0x3f)<<6)|(pUtf[2]&0x3f);
      /* Only really convert it when the resulting char is in range. */
      if( c && ((c < ' ') || wcschr(L"\"*:<>?|", c)) ){
        *qUtf++ = c; pUtf+=3; continue;
      }
    }
    *qUtf++ = *pUtf++;
  }
  *qUtf = 0;
  return zUtf;
#elif defined(__CYGWIN__)
  char *zOut;
  zOut = fossil_strdup(zPath);
  return zOut;
#elif defined(__APPLE__) && !defined(WITHOUT_ICONV)
  char *zIn = (char*)zPath;
  char *zOut;
  iconv_t cd;
  size_t n, x;
  for(n=0; zIn[n]>0 && zIn[n]<=0x7f; n++){}
  if( zIn[n]!=0 && (cd = iconv_open("UTF-8", "UTF-8-MAC"))!=(iconv_t)-1 ){
    char *zOutx;
    char *zOrig = zIn;
    size_t nIn, nOutx;
    nIn = n = strlen(zIn);
    nOutx = nIn+100;
    zOutx = zOut = fossil_malloc( nOutx+1 );
    x = iconv(cd, &zIn, &nIn, &zOutx, &nOutx);
    if( x==(size_t)-1 ){
      fossil_free(zOut);
      zOut = fossil_strdup(zOrig);
    }else{
      zOut[n+100-nOutx] = 0;
    }
    iconv_close(cd);
  }else{
    zOut = fossil_strdup(zPath);
  }
  return zOut;
#else
  return (char *)zPath;  /* No-op on non-mac unix */
#endif
}

/*
** Translate text from UTF-8 to the filename character set.
** Return a pointer to the translated text.
** Call fossil_path_free() to deallocate any memory used to store the
** returned pointer when done.
**
** On Windows, characters in the range U+0001 to U+0031 and the
** characters '"', '*', ':', '<', '>', '?' and '|' are invalid
** to be used, except in the 'extended path' prefix ('?') and
** as drive specifier (':'). Therefore, translate those to characters
** in the range U+F001 - U+F07F (private use area), so those
** characters never arrive in any Windows API. The filenames might
** look strange in Windows explorer, but in the cygwin shell
** everything looks as expected.
**
** See: <http://cygwin.com/cygwin-ug-net/using-specialnames.html>
**
*/
void *fossil_utf8_to_path(const char *zUtf8, int isDir){
#ifdef _WIN32
  int nReserved = isDir ? 12 : 0; /* For dir, need room for "FILENAME.EXT" */
  int nChar = MultiByteToWideChar(CP_UTF8, 0, zUtf8, -1, 0, 0);
  /* Overallocate 6 chars, making some room for extended paths */
  wchar_t *zUnicode = sqlite3_malloc( (nChar+6) * sizeof(wchar_t) );
  wchar_t *wUnicode = zUnicode;
  if( zUnicode==0 ){
    return 0;
  }
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
  ** path prefix and the path is larger than MAX_PATH chars,
  ** no Win32 API function can handle that unless it is
  ** prefixed with the extended path prefix. See:
  ** <http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath>
  **/
  if( fossil_isalpha(zUtf8[0]) && zUtf8[1]==':'
           && (zUtf8[2]=='\\' || zUtf8[2]=='/') ){
    if( wUnicode==zUnicode && nChar>MAX_PATH){
      memmove(wUnicode+4, wUnicode, nChar*sizeof(wchar_t));
      memcpy(wUnicode, L"\\\\?\\", 4*sizeof(wchar_t));
      wUnicode += 4;
    }
    /*
    ** If (remainder of) path starts with "<drive>:/" or "<drive>:\",
    ** leave the ':' intact but translate the backslash to a slash.
    */
    wUnicode[2] = '\\';
    wUnicode += 3;
  }else if( wUnicode==zUnicode && nChar>MAX_PATH
            && (zUtf8[0]=='\\' || zUtf8[0]=='/')
            && (zUtf8[1]=='\\' || zUtf8[1]=='/') && zUtf8[2]!='?'){
    memmove(wUnicode+6, wUnicode, nChar*sizeof(wchar_t));
    memcpy(wUnicode, L"\\\\?\\UNC", 7*sizeof(wchar_t));
    wUnicode += 7;
  }
  /*






|










|







215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
  ** path prefix and the path is larger than MAX_PATH chars,
  ** no Win32 API function can handle that unless it is
  ** prefixed with the extended path prefix. See:
  ** <http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath>
  **/
  if( fossil_isalpha(zUtf8[0]) && zUtf8[1]==':'
           && (zUtf8[2]=='\\' || zUtf8[2]=='/') ){
    if( wUnicode==zUnicode && (nChar+nReserved)>MAX_PATH){
      memmove(wUnicode+4, wUnicode, nChar*sizeof(wchar_t));
      memcpy(wUnicode, L"\\\\?\\", 4*sizeof(wchar_t));
      wUnicode += 4;
    }
    /*
    ** If (remainder of) path starts with "<drive>:/" or "<drive>:\",
    ** leave the ':' intact but translate the backslash to a slash.
    */
    wUnicode[2] = '\\';
    wUnicode += 3;
  }else if( wUnicode==zUnicode && (nChar+nReserved)>MAX_PATH
            && (zUtf8[0]=='\\' || zUtf8[0]=='/')
            && (zUtf8[1]=='\\' || zUtf8[1]=='/') && zUtf8[2]!='?'){
    memmove(wUnicode+6, wUnicode, nChar*sizeof(wchar_t));
    memcpy(wUnicode, L"\\\\?\\UNC", 7*sizeof(wchar_t));
    wUnicode += 7;
  }
  /*
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
#else
  return (void *)zUtf8;  /* No-op on unix */
#endif
}

/*
** Deallocate any memory that was previously allocated by
** fossil_filename_to_utf8() or fossil_utf8_to_filename().
*/
void fossil_filename_free(void *pOld){
#if defined(_WIN32)
  sqlite3_free(pOld);
#elif (defined(__APPLE__) && !defined(WITHOUT_ICONV)) || defined(__CYGWIN__)
  fossil_free(pOld);
#else
  /* No-op on all other unix */
#endif






|

|







285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
#else
  return (void *)zUtf8;  /* No-op on unix */
#endif
}

/*
** Deallocate any memory that was previously allocated by
** fossil_path_to_utf8() or fossil_utf8_to_path().
*/
void fossil_path_free(void *pOld){
#if defined(_WIN32)
  sqlite3_free(pOld);
#elif (defined(__APPLE__) && !defined(WITHOUT_ICONV)) || defined(__CYGWIN__)
  fossil_free(pOld);
#else
  /* No-op on all other unix */
#endif
Changes to src/vfile.c.
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
       "INSERT OR IGNORE INTO sfile(x) SELECT :file"
       "  WHERE NOT EXISTS(SELECT 1 FROM vfile WHERE"
       " pathname=:file %s)", filename_collation()
    );
  }
  depth++;

  zNative = fossil_utf8_to_filename(blob_str(pPath));
  d = opendir(zNative);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      char *zPath;
      char *zUtf8;
      if( pEntry->d_name[0]=='.' ){
        if( (scanFlags & SCAN_ALL)==0 ) continue;
        if( pEntry->d_name[1]==0 ) continue;
        if( pEntry->d_name[1]=='.' && pEntry->d_name[2]==0 ) continue;
      }
      zUtf8 = fossil_filename_to_utf8(pEntry->d_name);
      blob_appendf(pPath, "/%s", zUtf8);
      zPath = blob_str(pPath);
      if( glob_match(pIgnore1, &zPath[nPrefix+1]) ||
          glob_match(pIgnore2, &zPath[nPrefix+1]) ){
        /* do nothing */
#ifdef _DIRENT_HAVE_D_TYPE
      }else if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)






|










|







499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
       "INSERT OR IGNORE INTO sfile(x) SELECT :file"
       "  WHERE NOT EXISTS(SELECT 1 FROM vfile WHERE"
       " pathname=:file %s)", filename_collation()
    );
  }
  depth++;

  zNative = fossil_utf8_to_path(blob_str(pPath), 1);
  d = opendir(zNative);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      char *zPath;
      char *zUtf8;
      if( pEntry->d_name[0]=='.' ){
        if( (scanFlags & SCAN_ALL)==0 ) continue;
        if( pEntry->d_name[1]==0 ) continue;
        if( pEntry->d_name[1]=='.' && pEntry->d_name[2]==0 ) continue;
      }
      zUtf8 = fossil_path_to_utf8(pEntry->d_name);
      blob_appendf(pPath, "/%s", zUtf8);
      zPath = blob_str(pPath);
      if( glob_match(pIgnore1, &zPath[nPrefix+1]) ||
          glob_match(pIgnore2, &zPath[nPrefix+1]) ){
        /* do nothing */
#ifdef _DIRENT_HAVE_D_TYPE
      }else if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
#endif
        if( (scanFlags & SCAN_TEMP)==0 || is_temporary_file(zUtf8) ){
          db_bind_text(&ins, ":file", &zPath[nPrefix+1]);
          db_step(&ins);
          db_reset(&ins);
        }
      }
      fossil_filename_free(zUtf8);
      blob_resize(pPath, origSize);
    }
    closedir(d);
  }
  fossil_filename_free(zNative);

  depth--;
  if( depth==0 ){
    db_finalize(&ins);
  }
}







|




|







537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
#endif
        if( (scanFlags & SCAN_TEMP)==0 || is_temporary_file(zUtf8) ){
          db_bind_text(&ins, ":file", &zPath[nPrefix+1]);
          db_step(&ins);
          db_reset(&ins);
        }
      }
      fossil_path_free(zUtf8);
      blob_resize(pPath, origSize);
    }
    closedir(d);
  }
  fossil_path_free(zNative);

  depth--;
  if( depth==0 ){
    db_finalize(&ins);
  }
}

611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
       "UPDATE OR IGNORE dscan_temp SET y = coalesce(y, 0) + 1"
       "  WHERE x=:file %s",
       filename_collation()
    );
  }
  depth++;

  zNative = fossil_utf8_to_filename(blob_str(pPath));
  d = opendir(zNative);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      char *zOrigPath;
      char *zPath;
      char *zUtf8;
      if( pEntry->d_name[0]=='.' ){
        if( (scanFlags & SCAN_ALL)==0 ) continue;
        if( pEntry->d_name[1]==0 ) continue;
        if( pEntry->d_name[1]=='.' && pEntry->d_name[2]==0 ) continue;
      }
      zOrigPath = mprintf("%s", blob_str(pPath));
      zUtf8 = fossil_filename_to_utf8(pEntry->d_name);
      blob_appendf(pPath, "/%s", zUtf8);
      zPath = blob_str(pPath);
      if( glob_match(pIgnore1, &zPath[nPrefix+1]) ||
          glob_match(pIgnore2, &zPath[nPrefix+1]) ){
        /* do nothing */
#ifdef _DIRENT_HAVE_D_TYPE
      }else if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)






|












|







611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
       "UPDATE OR IGNORE dscan_temp SET y = coalesce(y, 0) + 1"
       "  WHERE x=:file %s",
       filename_collation()
    );
  }
  depth++;

  zNative = fossil_utf8_to_path(blob_str(pPath), 1);
  d = opendir(zNative);
  if( d ){
    while( (pEntry=readdir(d))!=0 ){
      char *zOrigPath;
      char *zPath;
      char *zUtf8;
      if( pEntry->d_name[0]=='.' ){
        if( (scanFlags & SCAN_ALL)==0 ) continue;
        if( pEntry->d_name[1]==0 ) continue;
        if( pEntry->d_name[1]=='.' && pEntry->d_name[2]==0 ) continue;
      }
      zOrigPath = mprintf("%s", blob_str(pPath));
      zUtf8 = fossil_path_to_utf8(pEntry->d_name);
      blob_appendf(pPath, "/%s", zUtf8);
      zPath = blob_str(pPath);
      if( glob_match(pIgnore1, &zPath[nPrefix+1]) ||
          glob_match(pIgnore2, &zPath[nPrefix+1]) ){
        /* do nothing */
#ifdef _DIRENT_HAVE_D_TYPE
      }else if( (pEntry->d_type==DT_UNKNOWN || pEntry->d_type==DT_LNK)
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
      }else if( file_wd_isfile_or_link(zPath) ){
#endif
        db_bind_text(&upd, ":file", zOrigPath);
        db_step(&upd);
        db_reset(&upd);
        result++; /* found 1 normal file */
      }
      fossil_filename_free(zUtf8);
      blob_resize(pPath, origSize);
      fossil_free(zOrigPath);
    }
    closedir(d);
  }
  fossil_filename_free(zNative);

  depth--;
  if( depth==0 ){
    db_finalize(&upd);
    db_finalize(&ins);
  }
  return result;






|





|







658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
      }else if( file_wd_isfile_or_link(zPath) ){
#endif
        db_bind_text(&upd, ":file", zOrigPath);
        db_step(&upd);
        db_reset(&upd);
        result++; /* found 1 normal file */
      }
      fossil_path_free(zUtf8);
      blob_resize(pPath, origSize);
      fossil_free(zOrigPath);
    }
    closedir(d);
  }
  fossil_path_free(zNative);

  depth--;
  if( depth==0 ){
    db_finalize(&upd);
    db_finalize(&ins);
  }
  return result;
Changes to src/wiki.c.
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
**   text/x-markdown         Markdown
**   anything else...        Plain text
*/
void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
  if( zMimetype==0 || fossil_strcmp(zMimetype, "text/x-fossil-wiki")==0 ){
    wiki_convert(pWiki, 0, 0);
  }else if( fossil_strcmp(zMimetype, "text/x-markdown")==0 ){
    Blob title = BLOB_INITIALIZER;
    Blob tail = BLOB_INITIALIZER;
    markdown_to_html(pWiki, &title, &tail);
#if 0
    if( blob_size(&title)>0 ){
      @ <h1>%s(blob_str(&title))</h1>
    }
#endif
    @ %s(blob_str(&tail))
    blob_reset(&title);
    blob_reset(&tail);
  }else{
    @ <pre>
    @ %h(blob_str(pWiki))
    @ </pre>
  }
}






<

|
<
<
<
<
<

<







145
146
147
148
149
150
151

152
153





154

155
156
157
158
159
160
161
**   text/x-markdown         Markdown
**   anything else...        Plain text
*/
void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
  if( zMimetype==0 || fossil_strcmp(zMimetype, "text/x-fossil-wiki")==0 ){
    wiki_convert(pWiki, 0, 0);
  }else if( fossil_strcmp(zMimetype, "text/x-markdown")==0 ){

    Blob tail = BLOB_INITIALIZER;
    markdown_to_html(pWiki, 0, &tail);





    @ %s(blob_str(&tail))

    blob_reset(&tail);
  }else{
    @ <pre>
    @ %h(blob_str(pWiki))
    @ </pre>
  }
}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156

1157
1158
1159

1160

1161

1162

1163
1164

1165
1166
1167

1168
1169
1170
1171
1172
1173
1174



1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190

1191
1192
1193
1194
1195



1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210


















1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226



1227
1228


1229
1230
1231
1232
1233
1234
1235
1236
1237

1238
1239
1240
1241
1242
1243
1244
1245
1246









1247



1248
1249
1250
1251
1252
1253















1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264

1265
1266
1267
1268






1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
  db_end_transaction(0);
  return 1;
}

/*
** COMMAND: wiki*
**
** Usage: %fossil wiki (export|create|commit|list) WikiName
**
** Run various subcommands to work with wiki entries.
**
**     %fossil wiki export PAGENAME ?FILE?
**
**        Sends the latest version of the PAGENAME wiki
**        entry to the given file or standard output.

**
**     %fossil wiki commit PAGENAME ?FILE? [-mimetype TEXT-FORMAT]
**

**        Commit changes to a wiki page from FILE or from standard

**        input. The -mimetype (-M) flag specifies the mime type,

**        defaulting to the type used by the previous version of

**        the page or (for new pages) text/x-fossil-wiki.
**

**     %fossil wiki create PAGENAME ?FILE? [-mimetype TEXT-FORMAT]
**
**        Create a new wiki page with initial content taken from

**        FILE or from standard input.
**
**     %fossil wiki list
**     %fossil wiki ls
**
**        Lists all wiki entries, one per line, ordered
**        case-insensitively by name.



**
*/
void wiki_cmd(void){
  int n;
  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto wiki_cmd_usage;
  }
  n = strlen(g.argv[2]);
  if( n==0 ){
    goto wiki_cmd_usage;
  }

  if( strncmp(g.argv[2],"export",n)==0 ){
    const char *zPageName;        /* Name of the wiki page to export */
    const char *zFile;            /* Name of the output file (0=stdout) */

    int rid;                      /* Artifact ID of the wiki page */
    int i;                        /* Loop counter */
    char *zBody = 0;              /* Wiki page content */
    Blob body;                    /* Wiki page content */
    Manifest *pWiki = 0;          /* Parsed wiki page content */



    if( (g.argc!=4) && (g.argc!=5) ){
      usage("export PAGENAME ?FILE?");
    }
    zPageName = g.argv[3];
    rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x"
      " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
      " ORDER BY x.mtime DESC LIMIT 1",
      zPageName
    );
    if( (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 ){
      zBody = pWiki->zWiki;
    }
    if( zBody==0 ){
      fossil_fatal("wiki page [%s] not found",zPageName);
    }


















    for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){}
    zBody[i] = 0;
    zFile  = (g.argc==4) ? "-" : g.argv[4];
    blob_init(&body, zBody, -1);
    blob_append(&body, "\n", 1);
    blob_write_to_file(&body, zFile);
    blob_reset(&body);
    manifest_destroy(pWiki);
    return;
  }else if( strncmp(g.argv[2],"commit",n)==0
            || strncmp(g.argv[2],"create",n)==0 ){
    const char *zPageName;        /* page name */
    Blob content;                 /* Input content */
    int rid;
    Manifest *pWiki = 0;          /* Parsed wiki page content */
    const char *zMimeType = find_option("mimetype", "M", 1);



    if( g.argc!=4 && g.argc!=5 ){
      usage("commit|create PAGENAME ?FILE? [-mimetype TEXT-FORMAT]");


    }
    zPageName = g.argv[3];
    if( g.argc==4 ){
      blob_read_from_channel(&content, stdin, -1);
    }else{
      blob_read_from_file(&content, g.argv[4]);
    }
    if(!zMimeType || !*zMimeType){
      /* Try to deduce the mime type based on the prior version. */

      rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x"
                   " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
                   " ORDER BY x.mtime DESC LIMIT 1",
                   zPageName
                   );
      if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0
         && (pWiki->zMimetype && *pWiki->zMimetype)){
        zMimeType = pWiki->zMimetype;
      }









    }



    if( g.argv[2][1]=='r' ){
      wiki_cmd_commit(zPageName, 1, &content, zMimeType, 1);
      fossil_print("Created new wiki page %s.\n", zPageName);
    }else{
      wiki_cmd_commit(zPageName, 0, &content, zMimeType, 1);
      fossil_print("Updated wiki page %s.\n", zPageName);















    }
    manifest_destroy(pWiki);
    blob_reset(&content);
  }else if( strncmp(g.argv[2],"delete",n)==0 ){
    if( g.argc!=5 ){
      usage("delete PAGENAME");
    }
    fossil_fatal("delete not yet implemented.");
  }else if(( strncmp(g.argv[2],"list",n)==0 )
          || ( strncmp(g.argv[2],"ls",n)==0 )){
    Stmt q;

    db_prepare(&q,
      "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'"
      " ORDER BY lower(tagname) /*sort*/"
    );






    while( db_step(&q)==SQLITE_ROW ){
      const char *zName = db_column_text(&q, 0);
      fossil_print( "%s\n",zName);
    }
    db_finalize(&q);
  }else{
    goto wiki_cmd_usage;
  }
  return;

wiki_cmd_usage:
  usage("export|create|commit|list ...");
}






|

|

|

|
|
>

|
|
>
|
>
|
>
|
>
|
<
>
|
<
|
>
|

|
|

|
|
>
>
>
















>





>
>
>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>


<













>
>
>

|
>
>









>
|
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
|
>
>
>
|
|
|
|
|
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>











>
|
|
|
|
>
>
>
>
>
>













1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161

1162
1163

1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235

1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
  db_end_transaction(0);
  return 1;
}

/*
** COMMAND: wiki*
**
** Usage: ../fossil wiki (export|create|commit|list) WikiName
**
** Run various subcommands to work with wiki entries or tech notes.
**
**    ../fossil wiki export ?PAGENAME? ?FILE? [-t|--technote DATETIME ]
**
**       Sends the latest version of either the PAGENAME wiki entry
**       or the DATETIME tech note to the given file or standard 
**       output. One of PAGENAME or DATETIME must be specified.
**
**    ../fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS?
**              
**       Create a new or commit changes to an existing wiki page or 
**       technote from FILE or from standard input.
**
**       Options:
**         -M|--mimetype TEXT-FORMAT   The mime type of the update defaulting
**                                     defaulting to the type used by the 
**                                     previous version of the page or (for 
**                                     new pages) text/x-fossil-wiki.

**         -t|--technote DATETIME      Specifies the timestamp of the technote
**                                     to be created or updated.

**         --technote-tags TAGS        The set of tags for a technote.
**         --technote-bgcolor COLOR    The color used for the technote on the
**                                     timeline.
**
**    ../fossil wiki list ?--technote?
**    ../fossil wiki ls ?--technote?
**
**       Lists all wiki entries, one per line, ordered
**       case-insensitively by name. The --technote flag
**       specifies that technotes will be listed instead of
**       the wiki entries, which will be listed in order
**       timestamp.
**
*/
void wiki_cmd(void){
  int n;
  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto wiki_cmd_usage;
  }
  n = strlen(g.argv[2]);
  if( n==0 ){
    goto wiki_cmd_usage;
  }

  if( strncmp(g.argv[2],"export",n)==0 ){
    const char *zPageName;        /* Name of the wiki page to export */
    const char *zFile;            /* Name of the output file (0=stdout) */
    const char *zETime;           /* The name of the technote to export */
    int rid;                      /* Artifact ID of the wiki page */
    int i;                        /* Loop counter */
    char *zBody = 0;              /* Wiki page content */
    Blob body;                    /* Wiki page content */
    Manifest *pWiki = 0;          /* Parsed wiki page content */

    zETime = find_option("technote","t",1);
    if( !zETime ){
      if( (g.argc!=4) && (g.argc!=5) ){
        usage("export PAGENAME ?FILE?");
      }
      zPageName = g.argv[3];
      rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x"
        " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
        " ORDER BY x.mtime DESC LIMIT 1",
        zPageName
      );
      if( (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("wiki page [%s] not found",zPageName);
      }
      zFile = (g.argc==4) ? "-" : g.argv[4];
    }else{
      if( (g.argc!=3) && (g.argc!=4) ){
        usage("export ?FILE? --technote DATETIME");
      }
      rid = db_int(0, "SELECT objid FROM event"
        " WHERE datetime(mtime)=datetime('%q') AND type='e'"
        " ORDER BY mtime DESC LIMIT 1",
        zETime
      );
      if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){
        zBody = pWiki->zWiki;
      }
      if( zBody==0 ){
        fossil_fatal("technote not found");
      }
      zFile = (g.argc==3) ? "-" : g.argv[3];
    }
    for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){}
    zBody[i] = 0;

    blob_init(&body, zBody, -1);
    blob_append(&body, "\n", 1);
    blob_write_to_file(&body, zFile);
    blob_reset(&body);
    manifest_destroy(pWiki);
    return;
  }else if( strncmp(g.argv[2],"commit",n)==0
            || strncmp(g.argv[2],"create",n)==0 ){
    const char *zPageName;        /* page name */
    Blob content;                 /* Input content */
    int rid;
    Manifest *pWiki = 0;          /* Parsed wiki page content */
    const char *zMimeType = find_option("mimetype", "M", 1);
    const char *zETime = find_option("technote", "t", 1);
    const char *zTags = find_option("technote-tags", NULL, 1);
    const char *zClr = find_option("technote-bgcolor", NULL, 1);
    if( g.argc!=4 && g.argc!=5 ){
      usage("commit|create PAGENAME ?FILE? [--mimetype TEXT-FORMAT]"
            " [--technote DATETIME] [--technote-tags TAGS]"
            " [--technote-bgcolor COLOR]");
    }
    zPageName = g.argv[3];
    if( g.argc==4 ){
      blob_read_from_channel(&content, stdin, -1);
    }else{
      blob_read_from_file(&content, g.argv[4]);
    }
    if(!zMimeType || !*zMimeType){
      /* Try to deduce the mime type based on the prior version. */
      if ( !zETime ){ 
        rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x"
                     " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'"
                     " ORDER BY x.mtime DESC LIMIT 1",
                     zPageName
                     );
        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }else{
        rid = db_int(0, "SELECT objid FROM event"
                     " WHERE datetime(mtime)=datetime('%q') AND type='e'"
                     " ORDER BY mtime DESC LIMIT 1",
                     zPageName
                     );
        if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0
           && (pWiki->zMimetype && *pWiki->zMimetype)){
          zMimeType = pWiki->zMimetype;
        }
      }
    }
    if( !zETime ){
      if( g.argv[2][1]=='r' ){
        wiki_cmd_commit(zPageName, 1, &content, zMimeType, 1);
        fossil_print("Created new wiki page %s.\n", zPageName);
      }else{
        wiki_cmd_commit(zPageName, 0, &content, zMimeType, 1);
        fossil_print("Updated wiki page %s.\n", zPageName);
      }
    }else{
      char *zMETime;          /* Normalized, mutable version of zETime */
      zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))",
                        zETime);
      if( g.argv[2][1]=='r' ){
        event_cmd_commit(zMETime, 1, &content, zMimeType, zPageName,
                         zTags, zClr);
        fossil_print("Created new tech note %s.\n", zMETime);
      }else{
        event_cmd_commit(zMETime, 0, &content, zMimeType, zPageName,
                         zTags, zClr);
        fossil_print("Updated tech note %s.\n", zMETime);
      }
      free(zMETime);
    }
    manifest_destroy(pWiki);
    blob_reset(&content);
  }else if( strncmp(g.argv[2],"delete",n)==0 ){
    if( g.argc!=5 ){
      usage("delete PAGENAME");
    }
    fossil_fatal("delete not yet implemented.");
  }else if(( strncmp(g.argv[2],"list",n)==0 )
          || ( strncmp(g.argv[2],"ls",n)==0 )){
    Stmt q;
    if ( !find_option("technote","t",0) ){
      db_prepare(&q,
        "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'"
        " ORDER BY lower(tagname) /*sort*/"
      );
    }else{
      db_prepare(&q,
        "SELECT datetime(mtime) FROM event WHERE type='e'"
        " ORDER BY mtime /*sort*/"
      );
    }
    while( db_step(&q)==SQLITE_ROW ){
      const char *zName = db_column_text(&q, 0);
      fossil_print( "%s\n",zName);
    }
    db_finalize(&q);
  }else{
    goto wiki_cmd_usage;
  }
  return;

wiki_cmd_usage:
  usage("export|create|commit|list ...");
}
Changes to src/wikiformat.c.
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
/*
** z points to a "<" character.  Check to see if this is the start of
** a valid markup.  If it is, return the total number of characters in
** the markup including the initial "<" and the terminating ">".  If
** it is not well-formed markup, return 0.
*/
static int markupLength(const char *z){
  int n = 1;
  int inparen = 0;
  int c;
  if( z[n]=='/' ){ n++; }
  if( !fossil_isalpha(z[n]) ) return 0;
  while( fossil_isalnum(z[n]) || z[n]=='-' ){ n++; }
  c = z[n];






|







465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
/*
** z points to a "<" character.  Check to see if this is the start of
** a valid markup.  If it is, return the total number of characters in
** the markup including the initial "<" and the terminating ">".  If
** it is not well-formed markup, return 0.
*/
int htmlTagLength(const char *z){
  int n = 1;
  int inparen = 0;
  int c;
  if( z[n]=='/' ){ n++; }
  if( !fossil_isalpha(z[n]) ) return 0;
  while( fossil_isalnum(z[n]) || z[n]=='-' ){ n++; }
  c = z[n];
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
**
** z points to the start of a token.  Return the number of
** characters in that token.  Write the token type into *pTokenType.
*/
static int nextWikiToken(const char *z, Renderer *p, int *pTokenType){
  int n;
  if( z[0]=='<' ){
    n = markupLength(z);
    if( n>0 ){
      *pTokenType = TOKEN_MARKUP;
      return n;
    }else{
      *pTokenType = TOKEN_CHARACTER;
      return 1;
    }






|







660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
**
** z points to the start of a token.  Return the number of
** characters in that token.  Write the token type into *pTokenType.
*/
static int nextWikiToken(const char *z, Renderer *p, int *pTokenType){
  int n;
  if( z[0]=='<' ){
    n = htmlTagLength(z);
    if( n>0 ){
      *pTokenType = TOKEN_MARKUP;
      return n;
    }else{
      *pTokenType = TOKEN_CHARACTER;
      return 1;
    }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
** z points to the start of a token.  Return the number of
** characters in that token.
*/
static int nextHtmlToken(const char *z){
  int n;
  char c;
  if( (c=z[0])=='<' ){
    n = markupLength(z);
    if( n<=0 ) n = 1;
  }else if( fossil_isspace(c) ){
    for(n=1; z[n] && fossil_isspace(z[n]); n++){}
  }else if( c=='&' ){
    n = z[1]=='#' ? 2 : 1;
    while( fossil_isalnum(z[n]) ) n++;
    if( z[n]==';' ) n++;






|







1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
** z points to the start of a token.  Return the number of
** characters in that token.
*/
static int nextHtmlToken(const char *z){
  int n;
  char c;
  if( (c=z[0])=='<' ){
    n = htmlTagLength(z);
    if( n<=0 ) n = 1;
  }else if( fossil_isspace(c) ){
    for(n=1; z[n] && fossil_isspace(z[n]); n++){}
  }else if( c=='&' ){
    n = z[1]=='#' ? 2 : 1;
    while( fossil_isalnum(z[n]) ) n++;
    if( z[n]==';' ) n++;
Changes to src/winfile.c.
238
239
240
241
242
243
244
245


246
247
248
249
250
251
252
                   &privSetSize, &grantedAccess, &accessYesNo) ){
    /*
     * Unable to perform access check.
     */

    rc = -1; goto done;
  }
  if( !accessYesNo ) rc = -1;



done:

  if( hToken != NULL ){
    CloseHandle(hToken);
  }
  if( impersonated ){






|
>
>







238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
                   &privSetSize, &grantedAccess, &accessYesNo) ){
    /*
     * Unable to perform access check.
     */

    rc = -1; goto done;
  }
  if( !accessYesNo ){
    rc = -1;
  }

done:

  if( hToken != NULL ){
    CloseHandle(hToken);
  }
  if( impersonated ){
279
280
281
282
283
284
285
286
287
288
289
290
291
292
void win32_getcwd(char *zBuf, int nBuf){
  int i;
  char *zUtf8;
  wchar_t *zWide = fossil_malloc( sizeof(wchar_t)*nBuf );
  if( GetCurrentDirectoryW(nBuf, zWide)==0 ){
    fossil_fatal("cannot find current working directory.");
  }
  zUtf8 = fossil_filename_to_utf8(zWide);
  fossil_free(zWide);
  for(i=0; zUtf8[i]; i++) if( zUtf8[i]=='\\' ) zUtf8[i] = '/';
  strncpy(zBuf, zUtf8, nBuf);
  fossil_filename_free(zUtf8);
}
#endif /* _WIN32  -- This code is for win32 only */






|



|


281
282
283
284
285
286
287
288
289
290
291
292
293
294
void win32_getcwd(char *zBuf, int nBuf){
  int i;
  char *zUtf8;
  wchar_t *zWide = fossil_malloc( sizeof(wchar_t)*nBuf );
  if( GetCurrentDirectoryW(nBuf, zWide)==0 ){
    fossil_fatal("cannot find current working directory.");
  }
  zUtf8 = fossil_path_to_utf8(zWide);
  fossil_free(zWide);
  for(i=0; zUtf8[i]; i++) if( zUtf8[i]=='\\' ) zUtf8[i] = '/';
  strncpy(zBuf, zUtf8, nBuf);
  fossil_path_free(zUtf8);
}
#endif /* _WIN32  -- This code is for win32 only */
Changes to src/xfer.c.
189
190
191
192
193
194
195
196


197
198
199
200
201
202
203
    blob_reset(&content);
    content = next;
  }else{
    pXfer->nFileRcvd++;
  }
  sha1sum_blob(&content, &hash);
  if( !blob_eq_str(&pXfer->aToken[1], blob_str(&hash), -1) ){
    blob_appendf(&pXfer->err, "content does not match sha1 hash");


  }
  rid = content_put_ex(&content, blob_str(&hash), 0, 0, isPriv);
  Th_AppendToList(pzUuidList, pnUuidList, blob_str(&hash), blob_size(&hash));
  blob_reset(&hash);
  if( rid==0 ){
    blob_appendf(&pXfer->err, "%s", g.zErrMsg);
    blob_reset(&content);






|
>
>







189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    blob_reset(&content);
    content = next;
  }else{
    pXfer->nFileRcvd++;
  }
  sha1sum_blob(&content, &hash);
  if( !blob_eq_str(&pXfer->aToken[1], blob_str(&hash), -1) ){
    blob_appendf(&pXfer->err,
        "wrong hash on received artifact: expected %s but got %s",
        blob_str(&pXfer->aToken[1]), blob_str(&hash));
  }
  rid = content_put_ex(&content, blob_str(&hash), 0, 0, isPriv);
  Th_AppendToList(pzUuidList, pnUuidList, blob_str(&hash), blob_size(&hash));
  blob_reset(&hash);
  if( rid==0 ){
    blob_appendf(&pXfer->err, "%s", g.zErrMsg);
    blob_reset(&content);
706
707
708
709
710
711
712

713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730

731
732
733
734
735
736
737
      blob_appendf(&cluster, "M %s\n", db_column_text(&q, 0));
      nRow++;
      if( nRow>=800 && nUncl>nRow+100 ){
        md5sum_blob(&cluster, &cksum);
        blob_appendf(&cluster, "Z %b\n", &cksum);
        blob_reset(&cksum);
        rid = content_put(&cluster);

        blob_reset(&cluster);
        nUncl -= nRow;
        nRow = 0;
        blob_append_sql(&deleteWhere, ",%d", rid);
      }
    }
    db_finalize(&q);
    db_multi_exec(
      "DELETE FROM unclustered WHERE rid NOT IN (0 %s)"
      "   AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=unclustered.rid)",
      blob_sql_text(&deleteWhere)
    );
    blob_reset(&deleteWhere);
    if( nRow>0 ){
      md5sum_blob(&cluster, &cksum);
      blob_appendf(&cluster, "Z %b\n", &cksum);
      blob_reset(&cksum);
      content_put(&cluster);

      blob_reset(&cluster);
    }
  }
}

/*
** Send igot messages for every private artifact






>

















|
>







708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
      blob_appendf(&cluster, "M %s\n", db_column_text(&q, 0));
      nRow++;
      if( nRow>=800 && nUncl>nRow+100 ){
        md5sum_blob(&cluster, &cksum);
        blob_appendf(&cluster, "Z %b\n", &cksum);
        blob_reset(&cksum);
        rid = content_put(&cluster);
        manifest_crosslink(rid, &cluster, MC_NONE);
        blob_reset(&cluster);
        nUncl -= nRow;
        nRow = 0;
        blob_append_sql(&deleteWhere, ",%d", rid);
      }
    }
    db_finalize(&q);
    db_multi_exec(
      "DELETE FROM unclustered WHERE rid NOT IN (0 %s)"
      "   AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=unclustered.rid)",
      blob_sql_text(&deleteWhere)
    );
    blob_reset(&deleteWhere);
    if( nRow>0 ){
      md5sum_blob(&cluster, &cksum);
      blob_appendf(&cluster, "Z %b\n", &cksum);
      blob_reset(&cksum);
      rid = content_put(&cluster);
      manifest_crosslink(rid, &cluster, MC_NONE);
      blob_reset(&cluster);
    }
  }
}

/*
** Send igot messages for every private artifact
Changes to src/xfersetup.c.
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  }
  if( P("setup") ){
    cgi_redirect("xfersetup");
  }
  isSubmit = P("submit")!=0;
  z = P("x");
  if( z==0 ){
    z = db_get(zDbField, (char*)zDfltValue);
  }
  style_header("Edit %s", zTitle);
  if( P("clear")!=0 ){
    login_verify_csrf_secret();
    db_unset(zDbField, 0);
    if( xRebuild ) xRebuild();
    z = zDfltValue;






|







112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  }
  if( P("setup") ){
    cgi_redirect("xfersetup");
  }
  isSubmit = P("submit")!=0;
  z = P("x");
  if( z==0 ){
    z = db_get(zDbField, zDfltValue);
  }
  style_header("Edit %s", zTitle);
  if( P("clear")!=0 ){
    login_verify_csrf_secret();
    db_unset(zDbField, 0);
    if( xRebuild ) xRebuild();
    z = zDfltValue;
Changes to src/zip.c.
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  char zExTime[13];
  char zBuf[100];
  char zOutBuf[100000];

  /* Fill in as much of the header as we know.
  */
  nBlob = pFile ? blob_size(pFile) : 0;
  if( nBlob>0 ){
    iMethod = 8;
    switch( mPerm ){
      case PERM_LNK:   iMode = 0120755;   break;
      case PERM_EXE:   iMode = 0100755;   break;
      default:         iMode = 0100644;   break;
    }
  }else{
    iMethod = 0;
    iMode = 040755;
  }
  nameLen = strlen(zName);
  memset(zHdr, 0, sizeof(zHdr));
  put32(&zHdr[0], 0x04034b50);
  put16(&zHdr[4], 0x000a);






|
|





|







140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  char zExTime[13];
  char zBuf[100];
  char zOutBuf[100000];

  /* Fill in as much of the header as we know.
  */
  nBlob = pFile ? blob_size(pFile) : 0;
  if( pFile ){ /* This is a file, possibly empty... */
    iMethod = (nBlob>0) ? 8 : 0; /* Cannot compress zero bytes. */
    switch( mPerm ){
      case PERM_LNK:   iMode = 0120755;   break;
      case PERM_EXE:   iMode = 0100755;   break;
      default:         iMode = 0100644;   break;
    }
  }else{       /* This is a directory, no blob... */
    iMethod = 0;
    iMode = 040755;
  }
  nameLen = strlen(zName);
  memset(zHdr, 0, sizeof(zHdr));
  put32(&zHdr[0], 0x04034b50);
  put16(&zHdr[4], 0x000a);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
  rid = name_to_typed_rid(nRid?zRid:zName,"ci");
  if( rid==0 ){
    @ Not found
    return;
  }
  if( referred_from_login() ){
    style_header("ZIP Archive Download");
    @ <form action='%R/zip'>
    cgi_query_parameters_to_hidden();
    @ <p>ZIP Archive named <b>%h(zName).zip</b> holding the content
    @ of check-in <b>%h(zRid)</b>:
    @ <input type="submit" value="Download" />
    @ </form>
    style_footer();
    return;






|







473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
  rid = name_to_typed_rid(nRid?zRid:zName,"ci");
  if( rid==0 ){
    @ Not found
    return;
  }
  if( referred_from_login() ){
    style_header("ZIP Archive Download");
    @ <form action='%R/zip/%h(zName).zip'>
    cgi_query_parameters_to_hidden();
    @ <p>ZIP Archive named <b>%h(zName).zip</b> holding the content
    @ of check-in <b>%h(zRid)</b>:
    @ <input type="submit" value="Download" />
    @ </form>
    style_footer();
    return;
Added test/amend.test.








































































































































































































































































































































































































































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
#
# Copyright (c) 2015 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# Tests for the "amend" command.
#

proc short_uuid {uuid {len 10}} {
  string range $uuid 0 $len-1
}

proc artifact_from_timeline {res var} {
  upvar $var artid
  regexp {(?x)[0-9]{2}(?::[0-9]{2}){2}\s+\[([0-9a-f]+)]} $res m artid
}

proc manifest_comment {comment} {
  string map [list { } {\\s} \n {\\n} \r {\\r}] $comment
}

proc uuid_from_commit {res var} {
  upvar $var UUID
  regexp {^New_Version: ([0-9a-f]{40})$} $res m UUID
}

proc uuid_from_branch {res var} {
  upvar $var UUID
  regexp {^New branch: ([0-9a-f]{40})$} $res m UUID
}

proc uuid_from_checkout {var} {
  global RESULT
  upvar $var UUID
  fossil status
  regexp {checkout:\s+([0-9a-f]{40})} $RESULT m UUID
}

# Make sure we are not in an open repository and initialize new repository
repo_init

########################################
# Setup: Add file and commit           #
########################################

if {![uuid_from_checkout UUIDINIT]} {
  test amend-checkout-failure false
  return
}
write_file datafile "data"
fossil add datafile
fossil commit -m "c1"
if {![uuid_from_commit $RESULT UUID]} {
  test amend-setup-failure false
  return
}

########################################
# Test: -branch                        #
########################################
set UUIDB UUIDB
write_file datafile "data.file"
fossil commit -m "c2"
if {![uuid_from_commit $RESULT UUIDB]} {
  test amend-branch.setup false
}
fossil amend $UUIDB -branch amended-branch
test amend-branch-1.1 {[regexp {tags:\s+amended-branch} $RESULT]}
fossil branch ls
test amend-branch-1.2 {[string first "* amended-branch" $RESULT] != -1}
fossil tag list
test amend-branch-1.3 {[string first amended-branch $RESULT] != -1}
fossil tag list --raw $UUIDB
test amend-branch-1.4 {[string first "branch=amended-branch" $RESULT] != -1}
test amend-branch-1.5 {[string first "sym-amended-branch" $RESULT] != -1}
fossil timeline -n 1
test amend-branch-1.6 {[string match {*Move*to*branch*amended-branch*} $RESULT]}

########################################
# Test: -bgcolor                       #
########################################
set tc 0
foreach {color result} {
  0 0
  a a
  abcdef #abcdef
  abc123 #abc123
  123efg 123efg
  abcdefg abcdefg
  abcdeg abcdeg
  blue blue
  acf #acf
  123 #123
  #1234 #1234
  1234 1234
  123456 #123456
} {
  incr tc
  fossil amend $UUID -bgcolor $color
  test amend-bgcolor-1.$tc.a {[string match "*uuid:*$UUID*" $RESULT]}
  fossil tag list --raw $UUID
  test amend-bgcolor-1.$tc.b {[string first "bgcolor=$result" $RESULT] != -1}
  fossil timeline -n 1
  test amend-bgcolor-1.$tc.c {
    [string match "*Change*background*color*to*\"$result\"*" $RESULT]
  }
  if {[artifact_from_timeline $RESULT artid]} {
    fossil artifact $artid
    test amend-bgcolor-1.$tc.d {
      [string match "*T +bgcolor $UUID $result*" $RESULT]
    }
  } else {
    if {$VERBOSE} { protOut "No artifact found in timeline output" }
    test amend-bgcolor-1.$tc.d false
  }
}
fossil amend $UUID -bgcolor {}
test amend-bgcolor-2.1 {[string match "*uuid:*$UUID*" $RESULT]}
fossil tag list --raw $UUID
test amend-bgcolor-2.2 {
  [string first "bgcolor=" $RESULT] == -1 &&
  [string first "bgcolor" $RESULT] != -1
}
fossil timeline -n 1
test amend-bgcolor-2.3 {[string match "*Cancel*background*color.*" $RESULT]}
if {[artifact_from_timeline $RESULT artid]} {
  fossil artifact $artid
  test amend-bgcolor-2.4 {[string match "*T -bgcolor $UUID*" $RESULT]}
} else {
  if {$VERBOSE} { protOut "No artifact found in timeline output" }
  test amend-bgcolor-2.4 false
}

########################################
# Test: -branchcolor                   #
########################################
set UUID2 UUID2
fossil branch new brclr $UUID
if {![uuid_from_branch $RESULT UUID2]} {
  test amend-branchcolor.setup false
}
fossil update $UUID2
fossil amend $UUID2 -branchcolor yellow
test amend-branchcolor-1.1 {[string match "*uuid:*$UUID2*" $RESULT]}
fossil tag ls --raw $UUID2
test amend-branchcolor-1.2 {[string first "bgcolor=yellow" $RESULT] != -1}
fossil timeline -n 1
test amend-branchcolor-1.3 {
  [string match {*Change*branch*background*color*to*"yellow".*} $RESULT]
}
if {[regexp {(?x)[0-9]{2}(?::[0-9]{2}){2}\s+\[([0-9a-f]+)]} $RESULT m artid]} {
  fossil artifact $artid
  test amend-branchcolor-1.4 {
    [string match "*T \*bgcolor $UUID2 yellow*" $RESULT]
  }
} else {
  if {$VERBOSE} { protOut "No artifact found in timeline output" }
  test amend-branchcolor-1.4 false
}

set UUIDN UUIDN
write_file datafile "brclr"
fossil commit -m "brclr"
if {![uuid_from_commit $RESULT UUIDN]} {
  test amend-branchcolor-propagating.setup false
}
write_file datafile "bc1"
fossil commit -m "mc1"
write_file datafile "bc2"
fossil commit -m "mc2"
fossil amend $UUIDN -branchcolor deadbe
test amend-branchcolor-2.1 {[string match "*uuid:*$UUIDN*" $RESULT]}
fossil tag ls --raw current
test amend-branchcolor-2.2 {[string first "bgcolor=#deadbe" $RESULT] != -1}
fossil timeline -n 1
test amend-branchcolor-2.3 {
  [string match {*Change*branch*background*color*to*"#deadbe".*} $RESULT]
}

########################################
# Test: -author                        #
########################################
fossil amend $UUID -author author-test
test amend-author-1.1 {[string match {*comment:*(user:*author-test)*} $RESULT]}
fossil tag ls --raw $UUID
test amend-author-1.2 {[string first "user=author-test" $RESULT] != -1}
fossil timeline -n 1
test amend-author-1.3 {[string match {*Change*user*to*"author-test".*} $RESULT]}

########################################
# Test: -date                          #
########################################
set timestamp [clock scan yesterday]
set date [clock format $timestamp -format "%Y-%m-%d" -gmt 1]
set time [clock format $timestamp -format "%H:%M:%S" -gmt 1]
set datetime "$date $time"
fossil amend $UUIDINIT -date $datetime
test amend-date-1.1 {[string match "*uuid:*$UUIDINIT*$datetime*" $RESULT]}
fossil tag ls --raw $UUIDINIT
test amend-date-1.2 {[string first "date=$datetime" $RESULT] != -1}
fossil timeline -n 1
test amend-date-1.3 {[string match "*Timestamp*$date*$time*" $RESULT]}
set badformats {
  "%+"
  "%Y-%m-%d %H:%M%:%S %Z"
  "%d/%m/%Y %H:%M%:%S %Z"
  "%d/%m/%Y %H:%M%:%S"
  "%d/%m/%Y"
}
set sc 0
foreach badformat $badformats {
  incr sc
  set datetime [clock format $timestamp -format $badformat -gmt 1]
  fossil amend $UUIDINIT -date $datetime -expectError
  test amend-date-2.$sc {[string first "YYYY-MM-DD HH:MM:SS" $RESULT] != -1}
}

########################################
# Test: -hide                          #
########################################
set UUIDH UUIDH
fossil revert
fossil update trunk
fossil branch new tohide current
if {![uuid_from_branch $RESULT UUIDH]} {
  test amend-hide-setup false
}
fossil amend $UUIDH -hide
test amend-hide-1.1 {[string match "*uuid:*$UUIDH*" $RESULT]}
fossil tag ls --raw $UUIDH
test amend-hide-1.2 {[string first "hidden" $RESULT] != -1}
fossil timeline -n 1
test amend-hide-1.3 {[string match {*Add*propagating*"hidden".*} $RESULT]}

########################################
# Test: -close                          #
########################################
set UUIDC UUIDC
fossil branch new cllf $UUID
if {![uuid_from_branch $RESULT UUIDC]} {
  test amend-close.setup false
}
fossil update $UUIDC
fossil amend $UUIDC -close
test amend-close-1.1.a {[string match "*uuid:*$UUIDC*" $RESULT]}
test amend-close-1.1.b {
  [string match "*comment:*Create*new*branch*named*\"cllf\"*" $RESULT]
}
fossil tag ls --raw $UUIDC
test amend-close-1.2 {[string first "closed" $RESULT] != -1}
fossil timeline -n 1
test amend-close-1.3 {[string match {*Marked*"Closed".*} $RESULT]}
write_file datafile "cllf"
fossil commit -m "should fail" -expectError
test amend-close-2 {[string first "closed leaf" $RESULT] != -1}

set UUID3 UUID3
fossil revert
fossil update trunk
write_file datafile "cb"
fossil commit -m "closed-branch" --branch "closebranch"
if {![uuid_from_commit $RESULT UUID3]} {
  test amend-close-3.setup false
}
write_file datafile "b1"
fossil commit -m "m1"
write_file datafile "b2"
fossil commit -m "m2"
fossil amend $UUID3 --close
test amend-close-3.1 {[string match "*uuid:*$UUID3*" $RESULT]}
fossil tag ls --raw current
test amend-close-3.2 {[string first "closed" $RESULT] != -1}
fossil timeline -n 1
test amend-close-3.3 {
  [string match "*Add*propagating*\"closed\".*" $RESULT]
}
write_file datafile "changed"
fossil commit -m "should fail" -expectError
test amend-close-3.4 {[string first "closed leaf" $RESULT] != -1}

########################################
# Test: -tag/-cancel                   #
########################################
set tagtests {
  tagged tagged
  {000000 lower Upper alpha 0alpha} {000000 0alpha Upper alpha lower}
}
set tc 0
foreach {tagt result} $tagtests {
  incr tc
  set tags {}
  set cancels {}
  set t1exp ""
  set t2exp "*"
  set t3exp "*"
  set t5exp "*"
  foreach tag $tagt { 
    lappend tags -tag $tag
    lappend cancels -cancel $tag
  }
  foreach res $result {
    append t1exp ", $res"
    append t2exp "sym-$res*"
    append t3exp "Add*tag*\"$res\".*"
    append t5exp "Cancel*tag*\"$res\".*"
  }
  eval fossil amend $UUID $tags
  test amend-tag-$tc.1 {[string match "*uuid:*$UUID*tags:*$t1exp*" $RESULT]}
  fossil tag ls --raw $UUID
  test amend-tag-$tc.2 {[string match $t2exp $RESULT]}
  fossil timeline -n 1
  test amend-tag-$tc.3 {[string match $t3exp $RESULT]}
  eval fossil amend $UUID $cancels
  test amend-tag-$tc.4 {![string match "*tags:*$t1exp*" $RESULT]}
  fossil timeline -n 1
  test amend-tag-$tc.5 {[string match $t5exp $RESULT]}
}

########################################
# Test: -comment                       #
########################################
proc prep-test {comment content} {
  global UUID RESULT

  fossil revert
  fossil update trunk
  write_file datafile $comment
  fossil commit -m $content
  if {![uuid_from_commit $RESULT UUID]} {
    set UUID ""
  }
}

proc test-comment {name UUID comment} {
  global VERBOSE RESULT

  test amend-comment-$name.1 {
    [string match "*uuid:*$UUID*comment:*$comment*" $RESULT]
  }
  fossil timeline -n 1
  if {[artifact_from_timeline $RESULT artid]} {
    fossil artifact $artid
    test amend-comment-$name.2 {
      [string match "*T +comment $UUID *[manifest_comment $comment]*" $RESULT]
    }
  } else {
    if {$VERBOSE} { protOut "No artifact found in timeline output: $RESULT" }
    test amend-comment-$name.2 false
  }
  fossil timeline -n 1
  test amend-comment-$name.3 {
    [string match "*[short_uuid $UUID]*Edit*check-in*comment.*" $RESULT]
  }
  fossil info $UUID
  test amend-comment-$name.4 {
    [string match "*uuid:*$UUID*comment:*$comment*" $RESULT]
  }
}

prep-test "revision 1" "revision 1"
fossil amend $UUID -comment "revised revision 1"
test-comment 1 $UUID "revised revision 1"

prep-test "revision 2" "revision 2"
fossil amend $UUID -m "revised revision 2 with -m"
test-comment 2 $UUID "revised revision 2 with -m"

prep-test "revision 3" "revision 3"
write_file commitmsg "revision 3 revised"
fossil amend $UUID -message-file commitmsg
test-comment 3 $UUID "revision 3 revised"

prep-test "revision 4" "revision 4"
write_file commitmsg "revision 4 revised with -M"
fossil amend $UUID -M commitmsg
test-comment 4 $UUID "revision 4 revised with -M"

prep-test "final comment" "final content"
if {[catch {exec which ed} result]} {
  if {$VERBOSE} { protOut "Install ed for interactive comment test: $result" }
  test-comment 5 $UUID "ed required for interactive edit"
} else {
  fossil settings editor "ed -s"
  set comment "interactive edited comment"
  fossil_maybe_answer "a\n$comment\n.\nw\nq\n" amend $UUID --edit-comment
  test-comment 5 $UUID $comment
}

########################################
# Test: NULL UUID                      #
########################################
fossil amend {} -close -expectError
test amend-null-uuid {$CODE && [string first "no such check-in" $RESULT] != -1}
Changes to test/clean.test.
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
fossil extra
test clean-5 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-6 {[normalize_result] eq {NEW f2}}
test clean-7 {[read_file f2] eq "f2 line"}

###############################################################################

fossil extra
test clean-8 {[normalize_result] eq {f2}}







|







55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
fossil extra
test clean-5 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-6 {[normalize_result] eq {NEW    f2}}
test clean-7 {[read_file f2] eq "f2 line"}

###############################################################################

fossil extra
test clean-8 {[normalize_result] eq {f2}}

85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
fossil extra
test clean-11 {[normalize_result] eq {f3}}

###############################################################################

fossil undo
test clean-12 {[normalize_result] eq {NEW f2}}
test clean-13 {[read_file f2] eq "f2 line"}
test clean-14 {[read_file f3] eq [string repeat ABCDEFGHIJK 1048576]}

###############################################################################

fossil extra
test clean-15 {[normalize_result] eq {f2






|







85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
fossil extra
test clean-11 {[normalize_result] eq {f3}}

###############################################################################

fossil undo
test clean-12 {[normalize_result] eq {NEW    f2}}
test clean-13 {[read_file f2] eq "f2 line"}
test clean-14 {[read_file f3] eq [string repeat ABCDEFGHIJK 1048576]}

###############################################################################

fossil extra
test clean-15 {[normalize_result] eq {f2
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
fossil extra
test clean-17 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-18 {[normalize_result] eq {NEW f2}}
test clean-19 {[read_file f2] eq "f2 line"}

###############################################################################

write_file f4 [string repeat KJIHGFEDCBA 1048576]
fossil extra
test clean-20 {[normalize_result] eq {f2






|







110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
fossil extra
test clean-17 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-18 {[normalize_result] eq {NEW    f2}}
test clean-19 {[read_file f2] eq "f2 line"}

###############################################################################

write_file f4 [string repeat KJIHGFEDCBA 1048576]
fossil extra
test clean-20 {[normalize_result] eq {f2
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
fossil extra
test clean-22 {[normalize_result] eq {f2
f4}}

###############################################################################

fossil undo
test clean-23 {[normalize_result] eq {nothing to undo}}

###############################################################################

# clean w/undo disabled, force, 1 file < 10MiB, 1 file > 10MiB
fossil clean --disable-undo --force
test clean-24 {[normalize_result] eq {}}

###############################################################################

fossil extra
test clean-25 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-26 {[normalize_result] eq {nothing to undo}}

###############################################################################

write_file f5 "f5 line"
fossil extra
test clean-27 {[normalize_result] eq {f5}}






|















|







134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
fossil extra
test clean-22 {[normalize_result] eq {f2
f4}}

###############################################################################

fossil undo -expectError
test clean-23 {[normalize_result] eq {nothing to undo}}

###############################################################################

# clean w/undo disabled, force, 1 file < 10MiB, 1 file > 10MiB
fossil clean --disable-undo --force
test clean-24 {[normalize_result] eq {}}

###############################################################################

fossil extra
test clean-25 {[normalize_result] eq {}}

###############################################################################

fossil undo -expectError
test clean-26 {[normalize_result] eq {nothing to undo}}

###############################################################################

write_file f5 "f5 line"
fossil extra
test clean-27 {[normalize_result] eq {f5}}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
###############################################################################

fossil extra
test clean-29 {[normalize_result] eq {}}

###############################################################################

fossil undo
test clean-30 {[normalize_result] eq {nothing to undo}}

###############################################################################

fossil extra
test clean-31 {[normalize_result] eq {}}






|






176
177
178
179
180
181
182
183
184
185
186
187
188
189
###############################################################################

fossil extra
test clean-29 {[normalize_result] eq {}}

###############################################################################

fossil undo -expectError
test clean-30 {[normalize_result] eq {nothing to undo}}

###############################################################################

fossil extra
test clean-31 {[normalize_result] eq {}}
Changes to test/file1.test.
13
14
15
16
17
18
19


20
21
22
23
24
25
26
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# File utilities
#



proc simplify-name {testname args} {
  set i 1
  foreach {path result} $args {
    fossil test-simplify-name $path
    test simplify-name-$testname.$i {$::RESULT=="\[$path\] -> \[$result\]"}
    incr i






>
>







13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# File utilities
#

repo_init

proc simplify-name {testname args} {
  set i 1
  foreach {path result} $args {
    fossil test-simplify-name $path
    test simplify-name-$testname.$i {$::RESULT=="\[$path\] -> \[$result\]"}
    incr i
63
64
65
66
67
68
69



70
71
72
73
74
75
76
simplify-name 106 a/b/../../../x/y ../x/y /a/b/../../../x/y /../x/y
simplify-name 107 a/./b/.././../x/y x/y a//.//b//..//.//..//x//y/// x/y

if {$::tcl_platform(os)=="Windows NT"} {
  simplify-name 108 //?/a:/a/b a:/a/b //?/UNC/a/b //a/b //?/ {}
  simplify-name 109 \\\\?\\a:\\a\\b a:/a/b \\\\?\\UNC\\a\\b //a/b \\\\?\\ {}
}




# Those directories are only needed for the testcase being able to "--chdir" to it.
file mkdir test1
file mkdir test1/test2

relative-name 100 . . . test1 [pwd] .. test1 [pwd]/ .. test1 [pwd]/test ../test
relative-name 101 test1/test2 [pwd] ../.. test1/test2 [pwd]/ ../.. test1/test2 [pwd]/test ../../test






>
>
>







65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
simplify-name 106 a/b/../../../x/y ../x/y /a/b/../../../x/y /../x/y
simplify-name 107 a/./b/.././../x/y x/y a//.//b//..//.//..//x//y/// x/y

if {$::tcl_platform(os)=="Windows NT"} {
  simplify-name 108 //?/a:/a/b a:/a/b //?/UNC/a/b //a/b //?/ {}
  simplify-name 109 \\\\?\\a:\\a\\b a:/a/b \\\\?\\UNC\\a\\b //a/b \\\\?\\ {}
}

# This is needed because we are now running outside of the Fossil checkout.
file mkdir file1; set savedPwd [pwd]; cd file1

# Those directories are only needed for the testcase being able to "--chdir" to it.
file mkdir test1
file mkdir test1/test2

relative-name 100 . . . test1 [pwd] .. test1 [pwd]/ .. test1 [pwd]/test ../test
relative-name 101 test1/test2 [pwd] ../.. test1/test2 [pwd]/ ../.. test1/test2 [pwd]/test ../../test
87
88
89
90
91
92
93


absolute-tree-name 100 . . $dirname test1 [pwd] [pwd] test1 [pwd]/ $dirname/file1 test1 [pwd]/test $dirname/file1/test
absolute-tree-name 101 test1/test2 [pwd] $dirname/file1 test1/test2 [pwd]/ $dirname/file1 test1/test2 [pwd]/test $dirname/file1/test
absolute-tree-name 102 test1 [pwd]/test $dirname/file1/test . [pwd]/file1 $dirname/file1/file1 . [pwd]/file1/file2 $dirname/file1/file1/file2
absolute-tree-name 103 . [pwd] $dirname/file1

catch {file delete test1/test2}
catch {file delete test1}








>
>
92
93
94
95
96
97
98
99
100
absolute-tree-name 100 . . $dirname test1 [pwd] [pwd] test1 [pwd]/ $dirname/file1 test1 [pwd]/test $dirname/file1/test
absolute-tree-name 101 test1/test2 [pwd] $dirname/file1 test1/test2 [pwd]/ $dirname/file1 test1/test2 [pwd]/test $dirname/file1/test
absolute-tree-name 102 test1 [pwd]/test $dirname/file1/test . [pwd]/file1 $dirname/file1/file1 . [pwd]/file1/file2 $dirname/file1/file1/file2
absolute-tree-name 103 . [pwd] $dirname/file1

catch {file delete test1/test2}
catch {file delete test1}

if {[info exists savedPwd]} {cd $savedPwd; unset savedPwd}
Added test/json.test.
































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
#
# Copyright (c) 2016 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
# Test JSON Support
#

# Make sure we have a build with the json command at all and that it
# is not stubbed out. This assumes the current (as of 2016-01-27)
# practice of eliminating all trace of the fossil json command when
# not configured. If that changes, these conditions might not prevent
# the rest of this file from running.
fossil test-th-eval "hasfeature json"

if {$::RESULT ne "1"} then {
  puts "Fossil was not compiled with JSON support."; return
}

# We need a JSON parser to effectively test the JSON produced by
# fossil. It looks like the one from tcllib is exactly what we need.
# On ActiveTcl, add it with teacup. On other platforms, YMMV.
# teacup install json
# teacup install json::write
package require json

proc json2dict {txt} {
  set rc [catch {::json::json2dict $txt} result options]
  if {$rc != 0} {
    protOut "JSON ERROR: $result"
    return {}
  }
  return $result
}

# and that the json itself smells ok and has the expected API error code in it
fossil json -expectError
set JR [json2dict $RESULT]
if {$JR eq ""} {
  puts "Fossil was not compiled with JSON support (bad JSON)."; return
}
test json-1 {[dict exists $JR resultCode]
             && [dict get $JR resultCode] eq "FOSSIL-4102"}

# Use the CLI interface to execute a JSON command. Sets the global
# RESULT to the response text, and JR to a Tcl dict conversion of the
# response body.
#
# Returns "200" or "500".
proc fossil_json {args} {
  global RESULT JR
  uplevel 1 fossil json {*}$args
  set JR [json2dict $RESULT]
  return "200"
}

# Use the HTTP interface to GET a JSON API URL. Sets the globals
# RESULT to the HTTP response body, and JR to a Tcl dict conversion of
# the response body.
#
# Returns the status code from the HTTP header.
proc fossil_http_json {url {cookie "Muppet=Monster"} args} {
  global RESULT JR
  set request "GET $url HTTP/1.1\r\nHost: localhost\r\nUser-Agent: Fossil-http-json\r\nCookie: $cookie"
  set RESULT [fossil_maybe_answer $request http {*}$args]
  regexp {(?w)(.*)^\s*$(.*)} $RESULT dummy head body
  regexp {^HTTP\S+\s+(\d\d\d)\s+(.*)$} $head dummy status msg
  if {$status eq "200"} {
    set JR [json2dict $body]
  }
  return $status
}


# Use the HTTP interface to POST a JSON API URL. Sets the globals
# RESULT to the HTTP response body, and JR to a Tcl dict conversion of
# the response body.
#
# Returns the status code from the HTTP header.
proc fossil_post_json {url data {cookie "Muppet=Monster"} args} {
  global RESULT JR

  # set up a full GET or POST HTTP request
  set len [string length $data]
  if {$len > 0} {
    set request [subst {POST $url HTTP/1.0\r
Host: localhost\r
User-Agent: Fossil-Test\r
Cookie: $cookie\r
Content-Type: application/json
Content-Length $len
\r
$data}]
  } else {
    set request [subst {GET $url HTTP/1.0\r
Host: localhost\r
User-Agent: Fossil-Test\r
Cookie: $cookie\r
\r
}]
  }

  # handle the actual request
  flush stdout
  #exec $fossilexe
  set RESULT [fossil_maybe_answer $request http {*}$args]

  # separate HTTP headers from body
  regexp {(?w)(.*)^\s*$(.*)} $RESULT dummy head body
  regexp {^HTTP\S+\s+(\d\d\d)\s+(.*)$} $head dummy status msg
  if {$status eq "200"} {
    if {[string length $body] > 0} {
      set JR [json2dict $body]
    } else {
      set JR ""
    }
  }
  return $status
}


# Inspect a dict for keys it must have and keys it must not have
proc test_dict_keys {testname D okfields badfields} {
  set i 1
  foreach f $okfields {
    test "$testname-$i" {[dict exists $D $f]}
    incr i
  }
  foreach f $badfields {
    test "$testname-$i" {![dict exists $D $f]}
    incr i
  }
}

# Inspect the envelope part of a returned JSON structure to confirm
# that it has specific fields and that it lacks specific fields.
proc test_json_envelope {testname okfields badfields} {
  test_dict_keys $testname $::JR $okfields $badfields
}

# Inspect the envelope of a normal successful result
proc test_json_envelope_ok {testname} {
  test_json_envelope $testname [concat fossil timestamp command procTimeUs \
  procTimeMs payload] [concat resultCode resultText]
}

# Inspect the payload of a successful result to confirm that it has
# specific fields and that it lacks specific fields.
proc test_json_payload {testname okfields badfields} {
  test_dict_keys $testname [dict get $::JR payload] $okfields $badfields
}

#### VERSION AKA HAI

# The JSON API generally assumes we have a respository, so let it have one.
repo_init

# Check for basic envelope fields in the result with an error
fossil_json -expectError
test_json_envelope json-enverr [concat resultCode fossil timestamp \
resultText command procTimeUs procTimeMs] {}
test json-enverr-rc-1 {[dict get $JR resultCode] eq "FOSSIL-3002"}


# Check for basic envelope fields in the result with a successful
# command
set HAIfields [concat manifestUuid manifestVersion manifestDate \
manifestYear releaseVersion releaseVersionNumber \
resultCodeParanoiaLevel jsonApiVersion]

fossil_json HAI
test_json_envelope_ok json-HAI
test_json_payload json-HAI $HAIfields {}
test json-HAI-api {[dict get $JR payload jsonApiVersion] >= 20120713}

# Check for basic envelope fields in a HTTP result with a successful
# command
fossil_http_json /json/HAI
test_json_envelope_ok json-http-HAI
test_json_payload json-http-HAI $HAIfields {}
test json-http-HAI-api {[dict get $JR payload jsonApiVersion] >= 20120713}

fossil_json version
test_json_envelope_ok json-version
test_json_payload json-version $HAIfields {}
test json-version-api {[dict get $JR payload jsonApiVersion] >= 20120713}

#### ARTIFACT

# sha1 of 0 bytes and a file to match in a commit
set UUID_empty da39a3ee5e6b4b0d3255bfef95601890afd80709
write_file empty ""
fossil add empty
fossil ci -m "empty file"

# json artifact (checkin)
fossil_json [concat artifact tip]
test_json_envelope_ok json-artifact-checkin-env
test json-artifact-checkin {[dict get $JR payload type] eq "checkin"}
test_json_payload json-artifact \
[concat type uuid isLeaf timestamp user comment parents tags files] {}

# json artifact (file)
fossil_json [concat artifact $UUID_empty]
test_json_envelope_ok json-artifact-file-env
test json-artifact-file {[dict get $JR payload type] eq "file"}
test_json_payload json-artifact [concat type uuid size checkins] {}

# json artifact (wiki)
fossil wiki create Empty <<"-=BLANK=-"
fossil_json wiki get Empty
test json-wiki-get {[dict get $JR payload name] eq "Empty"}
set uuid [dict get $JR payload uuid]
fossil_json artifact $uuid
test_json_envelope_ok json-artifact-wiki-env
test json-artifact-wiki {[dict get $JR payload type] eq "wiki"}
test_json_payload json-artifact-wiki [list type uuid artifact] {}
set artifact [dict get $JR payload artifact]
test_dict_keys json-artifact-wiki-artifact $artifact \
  [list name uuid user timestamp size] {}
# name, uuid, parent?, user, timestamp, size?, content?


#### AUTHENTICATION
fossil_json anonymousPassword
test_json_envelope_ok json-anonymousPassword-env
test_json_payload json-anonymousPassword {seed password} {}
set seed [dict get $JR payload seed]
set pass [dict get $JR payload password]

write_file anon-1 [subst {
{
  "command":"login",
  "payload":{
    "name":"anonymous",
    "anonymousSeed":$seed,
    "password":"$pass"
  }
}
}]
fossil_json --json-input anon-1
test_json_envelope_ok json-login-a-env
test_json_payload json-login-a {authToken name capabilities loginCookieName} {}
set AuthAnon [dict get $JR payload]
proc test_hascaps {testname need caps} {
  foreach n [split $need {}] {
    test $testname-$n {[string first $n $caps] >= 0}
  }
}
test_hascaps json-login-c "hmnc" [dict get $AuthAnon capabilities]

fossil user new U1 User-1 Uone
fossil user capabilities U1 s
write_file u1 {
{
  "command":"login",
  "payload":{
    "name":"U1",
    "password":"Uone"
  }
}
}
fossil_json --json-input u1
test_json_envelope_ok json-login-u1-env
test_json_payload json-login-u1 {authToken name capabilities loginCookieName} {}
set AuthU1 [dict get $JR payload]
test_hascaps json-login-c "s" [dict get $AuthU1 capabilities]

set U1Cookie [dict get $AuthU1 loginCookieName]=[regsub -all {[/]} [dict get $AuthU1 authToken] {%2F} ]
set AnonCookie [dict get $AuthAnon loginCookieName]=[regsub -all {[/]} [dict get $AuthAnon authToken] {%2F} ]

# json cap
# The CLI user has all rights, and no auth token affects that. This
# is consistent with the rest of the fossil CLI, and with the
# pragmatic argument that using the CLI implies physical access to
# the repo file itself, which can be taunted with many tools
# including raw SQLite which will also ignore authentication.
write_file anon-2 [subst {
  {"command":"cap",
   "authToken":"[dict get $AuthAnon authToken]"
  }
}]
fossil_json --json-input anon-2
test_json_envelope_ok json-cap-env
test json-cap-CLI {[dict get $JR payload permissionFlags setup]}

# json cap via POST with authToken in request envelope
set anon2 [read_file anon-2]
fossil_post_json "/json/cap" $anon2
test json-cap-POSTenv-env-0 {[string length $JR] > 0}
test_json_envelope_ok json-cap-POSTenv-env
test json-cap-POSTenv-name {[dict get $JR payload name] eq "anonymous"} knownBug
test json-cap-POSTenv-notsetup {![dict get $JR payload permissionFlags setup]}


# json cap via GET with authToken in Cookie header
fossil_post_json "/json/cap" {} $AnonCookie
test json-cap-GETcookie-env-0 {[string length $JR] > 0}
test_json_envelope_ok json-cap-GETcookie-env
test json-cap-GETcookie-name {[dict get $JR payload name] eq "anonymous"}
test json-cap-GETcookie-notsetup {![dict get $JR payload permissionFlags setup]}


# json cap via GET with authToken in a parameter
fossil_post_json "/json/cap?authToken=[dict get $AuthAnon authToken]" {}
test json-cap-GETcookie-env-0 {[string length $JR] > 0}
test_json_envelope_ok json-cap-GETcookie-env
test json-cap-GETcookie-name {[dict get $JR payload name] eq "anonymous"}
test json-cap-GETcookie-notsetup {![dict get $JR payload permissionFlags setup]}


# whoami
# via CLI with no auth token supplied
fossil_json whoami
test_json_envelope_ok json-whoami-cli-env
test_json_payload json-whoami-cli {name capabilities} {}
test json-whoami-cli-name {[dict get $JR payload name] eq "nobody"}
test_hascaps json-whoami-cli-cap "gjorz" [dict get $JR payload capabilities]

#### BRANCHES
# json branch list
fossil_json branch list
test_json_envelope_ok json-branch-list-env
test_json_payload json-branch-list {range current branches} {}
test json-branch-list-cur {[dict get $JR payload current] eq "trunk"}
test json-branch-list-cnt {[llength [dict get $JR payload branches]] == 1}
test json-branch-list-val {[dict get $JR payload branches] eq "trunk"}

# json branch create
fossil_json branch create alpha --basis trunk
test_json_envelope_ok json-branch-create-env
test_json_payload json-branch-create {name basis rid uuid isPrivate} {}


#### CONFIG
# json config get AREA
# AREAs are skin ticket project all skin-backup
foreach a [list skin ticket project all skin-backup] {
  fossil_json config get $a
  test_json_envelope_ok json-config-$a-env
  # payload depends on specific area and may be completely empty
}

#### DIFFS
# json diff v1 v2

write_file fish {
ABCD goldfish
}
fossil add fish
fossil ci -m "goldfish"
fossil_json finfo fish
set fishHist [dict get $JR payload checkins]
set fishV1 [dict get [lindex $fishHist 0] uuid]

write_file fish {
ABCD goldfish
LMNO goldfish
}
fossil ci -m "goldfish"
fossil_json finfo fish
set fishHist [dict get $JR payload checkins]
set fishV2 [dict get [lindex $fishHist 0] uuid]

test fossil-diff-setup {$fishV1 ne $fishV2}
fossil_json diff $fishV1 $fishV2
test_json_envelope_ok json-diff-env
test_json_payload json-diff {from to diff} {}
test json-diff-v1 {[dict get $JR payload from] eq $fishV1}
test json-diff-v2 {[dict get $JR payload to] eq $fishV2}
set diff [dict get $JR payload diff]
test json-diff-diff {[string first "+LMNO goldfish" $diff] >= 0}
protOut [dict get $JR payload diff]


#### DIRECTORY LISTING
# json dir DIRNAME
fossil_json dir
test_json_envelope_ok json-dir-env
test_json_payload json-dir {name entries} {}

#### FILE INFO
# json finfo FILENAME
fossil_json finfo empty
test_json_envelope_ok json-finfo-env
test_json_payload json-finfo {name checkins} {}

#### QUERY
# json query SQLCODE
fossil_json query {"SELECT * FROM reportfmt"}
test_json_envelope_ok json-query-env
test_json_payload json-query {columns rows} {}

#### STATS
# json stat
fossil_json stat
test_json_envelope_ok json-stat-env
test_json_payload json-stat {repositorySize ageDays ageYears projectCode compiler sqlite} \
{blobCount deltaCount uncompressedArtifactSize averageArtifactSize maxArtifactSize \
compressionRatio checkinCount fileCount wikiPageCount ticketCount}

fossil_json stat -f
test_json_envelope_ok json-stat-env
test_json_payload json-stat {repositorySize \
blobCount deltaCount uncompressedArtifactSize averageArtifactSize maxArtifactSize \
compressionRatio checkinCount fileCount wikiPageCount ticketCount \
ageDays ageYears projectCode compiler sqlite} {}


#### STATUS
# NOTE: Local checkout required
# json status
fossil_json status
test_json_envelope_ok json-status-env
test_json_payload json-status {repository localRoot checkout files errorCount} {}

#### TAGS

# json tag add NAME CHECKIN VALUE
fossil_json tag add blue trunk green
test_json_envelope_ok json-tag-add-env
test_json_payload json-tag-add {name value propagate raw appliedTo} {}


# json tag cancel NAME CHECKIN
fossil_json tag add cancel alpha
test_json_envelope_ok json-tag-cancel-env
# DOCBUG? Doc says no payload.
test_json_payload json-tag-cancel {name value propagate raw appliedTo} {}

# json tag find NAME
fossil_json tag find alpha
test_json_envelope_ok json-tag-find-env
test_json_payload json-tag-find {name raw type limit artifacts} {}
test json-tag-find-count {[llength [dict get $JR payload artifacts]] >= 1}

# json tag list CHECKIN
fossil_json tag list
test_json_envelope_ok json-tag-list-env
test_json_payload json-tag-list {raw includeTickets tags} {}
test json-tag-list-count {[llength [dict get $JR payload tags]] >= 2}


#### TICKETS
# API Docs say not yet defined, so it isn't quite fair to mark this
# category as TODO for the test cases...

#### TICKET REPORTS

# json report get NUMBER
fossil_json report get 1
test_json_envelope_ok json-report-get-env
test_json_payload json-report-get {report owner title timestamp columns sqlCode} {}

# json report list
fossil_json report list
test_json_envelope_ok json-report-list-env
#test_json_payload json-report-list {raw includeTickets tags} {}
test json-report-list-count {[llength [dict get $JR payload]] >= 1}


# json report run NUMBER
fossil_json report run 1
test_json_envelope_ok json-report-run-1-env
test_json_payload json-report-list {report title sqlcode columnNames tickets} {}
test json-report-list-count {[llength [dict get $JR payload columnNames]] >= 7}
test json-report-list-count {[llength [dict get $JR payload tickets]] >= 0}


#### TIMELINE

# json timeline checkin
fossil_json timeline checkin
test_json_envelope_ok json-timeline-checkin-env
test_json_payload json-timeline-checkin {limit timeline} {}
set i 0
foreach t [dict get $JR payload timeline] {
  # parents appears only for entries that have a parent
  # files appears only if requested by the --files parameter
  test_dict_keys json-timeline-checkin-$i $t {type uuid timestamp comment user isLeaf tags} {}
  incr i
}

# json timeline ci
# removed from documentation
#fossil_json timeline ci
#test json-timeline-ci {[dict get $JR resultCode] ne "FOSSIL-1102"} knownBug
#test_json_payload json-timeline-ci {limit timeline} {}

# json timeline ticket
fossil_json timeline ticket
test_json_envelope_ok json-timeline-ticket-env
test_json_payload json-timeline-ticket {limit timeline} {}

# json timeline wiki
fossil_json timeline wiki
test_json_envelope_ok json-timeline-wiki-env
test_json_payload json-timeline-wiki {limit timeline} {}


#### USER MANAGEMENT

# json user get
foreach u [list nobody anonymous reader developer U1] {
  fossil_json user get $u
  test_json_envelope_ok json-user-get-$u-env
  test_json_payload json-user-get-$u {uid name capabilities info timestamp} {}
}

# json user list
fossil_json user list
test_json_envelope_ok json-user-list-env
set i 0
foreach u [dict get $JR payload] {
  test_dict_keys json-user-list-$i $u {uid name capabilities info timestamp} {}
  incr i
}

# json user save
fossil_json user save --uid -1 --name U2 --password Utwo
test_json_envelope_ok json-user-save-env
test_json_payload json-user-save {uid name capabilities info timestamp} {}


# DOCBUG? Doc says payload is "same as /json/user/get" but actual
# result was an array of one user similar to /json/user/list.
#set i 0
#foreach u [dict get $JR payload] {
#  test_dict_keys json-user-save-$i $u {uid name capabilities info timestamp} {}
#  incr i
#}
#test json-user-save-count {$i == 1}



#### WIKI

# wiki list
fossil_json wiki list
test_json_envelope_ok json-wiki-list-env
set pages  [dict get $JR payload]
test json-wiki-1 {[llength $pages] == 1}
test json-wiki-2 {[lindex  $pages 0] eq "Empty"}
fossil_json wiki list --verbose
set pages  [dict get $JR payload]
test json-wiki-verbose-1 {[llength $pages] == 1}
test_dict_keys json-wiki-verbose-pages [lindex $pages 0] [list name uuid user timestamp size] {}

# wiki get
fossil_json wiki get Empty
test_json_envelope_ok json-wiki-get-env
# this page has only one version, so no parent should be listed
test_json_payload json-wiki-get [list name uuid user timestamp size content] [list parent]


# wiki create
# requires an authToken? Not from CLI.

write_file req.json {
  {
    "command":"wiki/create",
    "payload":{
      "name":"Page2",
      "content":"Lorem ipsum dolor sic amet."
    }
  }
}
fossil_json --json-input req.json
test_json_envelope_ok json-wiki-create-env
fossil_json wiki get Page2
test_json_envelope_ok json-wiki-create-get-env
test_json_payload json-wiki-save-get [list name uuid user timestamp size content] {parent}
set uuid1 [dict get $JR payload uuid]

# wiki save

write_file req2.json {
  {
    "command":"wiki/save",
    "payload":{
      "name":"Page2",
      "content":"Lorem ipsum dolor sic amet.\nconsectetur adipisicing elit."
    }
  }
}
fossil_json --json-input req2.json
test_json_envelope_ok json-wiki-save-env
fossil_json wiki get Page2
test_json_envelope_ok json-wiki-save-get-env
test_json_payload json-wiki-save-get [list name uuid user timestamp size parent content] {}
set uuid2 [dict get $JR payload uuid]
test json-wiki-save-parent {[dict get $JR payload parent] eq $uuid1}

# wiki diff

fossil_json wiki diff $uuid1 $uuid2
test_json_envelope_ok json-wiki-diff-env
test_json_payload json-wiki-diff [list v1 v2 diff] {}
test json-wiki-diff-v1 {[dict get $JR payload v1] eq $uuid1}
test json-wiki-diff-v1 {[dict get $JR payload v2] eq $uuid2}
set diff [dict get $JR payload diff]
test json-wiki-diff-diff {[string first "+consectetur adipisicing elit" $diff] >= 0}
#puts [dict get $JR payload diff]

# wiki preview
#
# takes a string in fossil wiki markup and return an HTML fragment.
# This command does not make use of the actual wiki content (much?)
# at all.
write_file req3.json {
  {
    "command":"wiki/preview",
    "payload":"Lorem ipsum dolor sic amet.\nconsectetur adipisicing elit."
  }
}
fossil_json --json-input req3.json
test_json_envelope_ok json-wiki-preview-env
set pv [dict get $JR payload]
test json-wiki-preview-out-1 {[string first "<p>Lorem ipsum" $pv] == 0}
test json-wiki-preview-out-2 {[string last "<p>" $pv] == 0}

#### UNAVOIDABLE MISC

# json g
fossil_json g
test_json_envelope_ok json-g-env
#puts [llength [dict keys [dict get $JR payload]]]
test json-g-g {[llength [dict keys [dict get $JR payload]]] >= 60};# 64 on my PC

# json rebuild
fossil_json rebuild
test_json_envelope json-rebuild-env [concat fossil timestamp command procTimeUs \
  procTimeMs] [concat payload resultCode resultText]

# json resultCodes
fossil_json resultCodes
test_json_envelope_ok json-resultCodes-env
set codes [dict get $JR payload]
test json-resultCodes-codes-1 {[llength $codes] >= 35} ;# count as of API 20120713
# foreach c $codes {
#   puts [dict values $c]
# }
foreach r $codes {
  protOut "# [dict get $r resultCode] [dict get $r cSymbol]\n#     [dict get $r description]"
}



#### From the API Docs

# Reminder to self: in March 2012 i saw a corner-case which returns
# HTML output. To reproduce: chmod 444 REPO, then submit a request
# which writes something (timeline creates a temp table). The "repo
# is not writable" error comes back as HTML. i don't know if the
# error happens before we have made the determination that the app is
# in JSON mode or if the error handling is incorrectly not
# recognizing JSON mode.
#
#repo_init x.fossil
#catch {exec chmod 444 .rep.fossil}; # Unix. What about Win?
fossil_http_json /json/timeline/checkin $U1Cookie
test json-ROrepo-1-1 {$CODE == 0}
test json-ROrepo-1-2 {[regexp {\}\s*$} $RESULT]}
test json-ROrepo-1-3 {![regexp {SQLITE_[A-Z]+:} $RESULT]}
test_json_envelope_ok json-http-timeline1
protOut "chmod 444 repo"
catch {exec chmod 444 .rep.fossil}; # Unix
catch {exec attrib +r .rep.fossil}; # Windows
fossil_http_json /json/timeline/checkin $U1Cookie -expectError
test json-ROrepo-2-1 {$CODE != 0}
test json-ROrepo-2-2 {[regexp {\}\s*$} $RESULT]} knownBug
test json-ROrepo-2-3 {![regexp {SQLITE_[A-Z]+:} $RESULT]} knownBug
#test_json_envelope_ok json-http-timeline2
catch {exec attrib -r .rep.fossil}; # Windows
catch {exec chmod 666 .rep.fossil}; # Unix


#### Result Codes
# Test cases designed to stimulate each (documented) error code.

# FOSSIL-0000
# Not returned by any command. We generally verify that in the
# test_json_envelope_ok command by verifying that the resultCode
# field is not present. Should any JSON endpoint begin to use the
# range reserved for non-fatal warnings, those tests will fail.
#
# Notice that code is not included in the list returned from
# /json/resultCodes.


# FOSSIL-1000 FSL_JSON_E_GENERIC
#     Generic error

# FOSSIL-1101 FSL_JSON_E_INVALID_REQUEST
#     Invalid request
write_file e1101.json {
  ["command","nope"]
}
fossil_json --json-input e1101.json -expectError
test json-RC-1101-array-CLI-exit {$CODE != 0}
test_json_envelope json-RC-1101-array-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-RC-1101-array-code {[dict get $JR resultCode] eq "FOSSIL-1101"}

write_file e1101.json {
  "Not really a command but more of a suggestion"
}
fossil_json --json-input e1101.json -expectError
test json-RC-1101-string-CLI-exit {$CODE != 0}
test_json_envelope json-RC-1101-string-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-RC-1101-string-code {[dict get $JR resultCode] eq "FOSSIL-1101"}




# FOSSIL-1102 FSL_JSON_E_UNKNOWN_COMMAND
#     Unknown command or subcommand
fossil_json NoSuchEndpoint -expectError
test json-RC-1102-CLI-exit {$CODE != 0}
test_json_envelope json-RC-1102-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-RC-1102-code {[dict get $JR resultCode] eq "FOSSIL-1102"}

write_file e1102.json {
  {
    "command":"no/such/endpoint"
  }
}
fossil_json --json-input e1102.json -expectError
test json-env-RC-1102-CLI-exit {$CODE != 0}
test_json_envelope json-env-RC-1102-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-env-RC-1102-code {[dict get $JR resultCode] eq "FOSSIL-1102"}


# FOSSIL-1103 FSL_JSON_E_UNKNOWN
#     Unknown error

write_file bad.sql  {
CREATE TABLE spam(a integer, b text);
}
exec $::fossilexe sqlite3 --no-repository bad.fossil <bad.sql
#fossil_json HAI -R bad.fossil -expectError

# FOSSIL-1104 FSL_JSON_E_TIMEOUT
#     Timeout reached
# FOSSIL-1105 FSL_JSON_E_ASSERT
#     Assertion failed
# FOSSIL-1106 FSL_JSON_E_ALLOC
#     Resource allocation failed
# FOSSIL-1107 FSL_JSON_E_NYI
#     Not yet implemented
# FOSSIL-1108 FSL_JSON_E_PANIC
#     x
# FOSSIL-1109 FSL_JSON_E_MANIFEST_READ_FAILED
#     Reading artifact manifest failed
# FOSSIL-1110 FSL_JSON_E_FILE_OPEN_FAILED
#     Opening file failed

# FOSSIL-2000 FSL_JSON_E_AUTH
#     Authentication error
# FOSSIL-2001 FSL_JSON_E_MISSING_AUTH
#     Authentication info missing from request
# FOSSIL-2002 FSL_JSON_E_DENIED
#     Access denied
# FOSSIL-2003 FSL_JSON_E_WRONG_MODE
#     Request not allowed (wrong operation mode)
# FOSSIL-2100 FSL_JSON_E_LOGIN_FAILED
#     Login failed
# FOSSIL-2101 FSL_JSON_E_LOGIN_FAILED_NOSEED
#     Anonymous login attempt was missing password seed
# FOSSIL-2102 FSL_JSON_E_LOGIN_FAILED_NONAME
#     Login failed - name not supplied
# FOSSIL-2103 FSL_JSON_E_LOGIN_FAILED_NOPW
#     Login failed - password not supplied
# FOSSIL-2104 FSL_JSON_E_LOGIN_FAILED_NOTFOUND
#     Login failed - no match found

# FOSSIL-3000 FSL_JSON_E_USAGE
#     Usage error
# FOSSIL-3001 FSL_JSON_E_INVALID_ARGS
#     Invalid argument(s)

# FOSSIL-3002 FSL_JSON_E_MISSING_ARGS
#     Missing argument(s)
write_file e3002.json {
  {"color":"yellow",
   "really":"no, blue",
   "number":42
  }
}
fossil_json --json-input e3002.json -expectError
test json-RC-3002-strange-CLI-exit {$CODE != 0}
test_json_envelope json-RC-3002-strange-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-RC-3002-strange-code {[dict get $JR resultCode] eq "FOSSIL-3002"}


# FOSSIL-3003 FSL_JSON_E_AMBIGUOUS_UUID
#     Resource identifier is ambiguous
# FOSSIL-3004 FSL_JSON_E_UNRESOLVED_UUID
#     Provided uuid/tag/branch could not be resolved
# FOSSIL-3005 FSL_JSON_E_RESOURCE_ALREADY_EXISTS
#     Resource already exists
# FOSSIL-3006 FSL_JSON_E_RESOURCE_NOT_FOUND
#     Resource not found

# FOSSIL-4000 FSL_JSON_E_DB
#     Database error
# FOSSIL-4001 FSL_JSON_E_STMT_PREP
#     Statement preparation failed
# FOSSIL-4002 FSL_JSON_E_STMT_BIND
#     Statement parameter binding failed
# FOSSIL-4003 FSL_JSON_E_STMT_EXEC
#     Statement execution/stepping failed
# FOSSIL-4004 FSL_JSON_E_DB_LOCKED
#     Database is locked
# FOSSIL-4101 FSL_JSON_E_DB_NEEDS_REBUILD
#     Fossil repository needs to be rebuilt

# FOSSIL-4102 FSL_JSON_E_DB_NOT_FOUND
#     Fossil repository db file could not be found.
fossil close
fossil_json HAI -expectError
test json-RC-4102-CLI-exit {$CODE != 0}
test_json_envelope json-RC-1102-env {fossil timestamp command procTimeUs \
procTimeMs resultCode resultText} {payload}
test json-1 {[dict get $JR resultCode] eq "FOSSIL-4102"}
fossil open .rep.fossil

# FOSSIL-4103 FSL_JSON_E_DB_NOT_VALID
#     Fossil repository db file is not valid.
write_file nope.fossil {
This is not a fossil repo. It ought to be a SQLite db with a well-known schema,
but it is actually just a block of text.
}
Changes to test/merge5.test.
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
    test merge5-$testid 0
  } else {
    test merge5-$testid 1
  }    
}

catch {exec $::fossilexe info} res
puts res=$res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}
#
# Fossil will write data on $HOME, running 'fossil open' here.
# We need not to clutter the $HOME of the test caller.
set env(HOME) [pwd]

# Construct a test repository
#
exec sqlite3 m5.fossil <$testdir/${testfile}_repo.sql
fossil rebuild m5.fossil
fossil open m5.fossil
fossil user default drh --user drh
fossil update baseline
checkout-test 10 {
  da5c8346496f3421cb58f84b6e59e9531d9d424d  one.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt






<











|







35
36
37
38
39
40
41

42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
    test merge5-$testid 0
  } else {
    test merge5-$testid 1
  }    
}

catch {exec $::fossilexe info} res

if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}
#
# Fossil will write data on $HOME, running 'fossil open' here.
# We need not to clutter the $HOME of the test caller.
set env(HOME) [pwd]

# Construct a test repository
#
exec $::fossilexe sqlite3 --no-repository m5.fossil <$testdir/${testfile}_repo.sql
fossil rebuild m5.fossil
fossil open m5.fossil
fossil user default drh --user drh
fossil update baseline
checkout-test 10 {
  da5c8346496f3421cb58f84b6e59e9531d9d424d  one.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
fossil update br1
checkout-test 120 {
  35815cf5804e8933eab64ae34e00bbb381be72c5  four.txt
  da5c8346496f3421cb58f84b6e59e9531d9d424d  one.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil merge br4
checkout-test 121 {
  35815cf5804e8933eab64ae34e00bbb381be72c5  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil undo
fossil update br4
checkout-test 122 {
  6e167b139c294bed560e2e30b352361b101e1f39  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil merge br1
checkout-test 123 {
  6e167b139c294bed560e2e30b352361b101e1f39  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil undo







|












|







220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
fossil update br1
checkout-test 120 {
  35815cf5804e8933eab64ae34e00bbb381be72c5  four.txt
  da5c8346496f3421cb58f84b6e59e9531d9d424d  one.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil merge br4 -expectError
checkout-test 121 {
  35815cf5804e8933eab64ae34e00bbb381be72c5  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil undo
fossil update br4
checkout-test 122 {
  6e167b139c294bed560e2e30b352361b101e1f39  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil merge br1 -expectError
checkout-test 123 {
  6e167b139c294bed560e2e30b352361b101e1f39  four.txt
  ed24d19d726d173f18dbf4a9a0f8514daa3e3ca4  three.txt
  278a402316510f6ae4a77186796a6bde78c7dbc1  two.txt
}
fossil undo

Changes to test/merge6.test.
60
61
62
63
64
65
66
67
fossil merge branch_for_f3_f4
fossil commit -m "new trunk files f2, f3, and f4 via merge"
fossil ls

test merge_multi-4 {[normalize_result] eq {f1
f2
f3
f4}}






|
60
61
62
63
64
65
66
67
fossil merge branch_for_f3_f4
fossil commit -m "new trunk files f2, f3, and f4 via merge"
fossil ls

test merge_multi-4 {[normalize_result] eq {f1
f2
f3
f4}} knownBug
Changes to test/merge_renames.test.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Tests for merging with renames
#
#

catch {exec $::fossilexe info} res
puts res=$res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

######################################
#  Test 1                            #





<







1
2
3
4
5
6

7
8
9
10
11
12
13
#
# Tests for merging with renames
#
#

catch {exec $::fossilexe info} res

if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

######################################
#  Test 1                            #
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
        set deletes [expr $deletes + 1]
    }
}

if {$deletes!=0} {
    # failed
    protOut "Error, the merge should not delete any file"
    test merge_renames-2 0
} else {
    test merge_renames-2 1
}

######################################
#  Test 4                            #
#  Reported: Ticket [67176c3aa4]     #
######################################







|

|







154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
        set deletes [expr $deletes + 1]
    }
}

if {$deletes!=0} {
    # failed
    protOut "Error, the merge should not delete any file"
    test merge_renames-3 0
} else {
    test merge_renames-3 1
}

######################################
#  Test 4                            #
#  Reported: Ticket [67176c3aa4]     #
######################################

197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
fossil merge trunk
fossil commit -m "trunk merged, should have 3 files"

fossil ls

test merge_renames-5 {[normalize_result] eq {f1
f2
f3}}

######################################
#
# Tests for troubles not specifically linked with renames but that I'd like to
# write:
#  [c26c63eb1b] - 'merge --backout' does not handle conflicts properly
#  [953031915f] - Lack of warning when overwriting extra files
#  [4df5f38f1e] - Troubles merging a file delete with a file change






|








196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
fossil merge trunk
fossil commit -m "trunk merged, should have 3 files"

fossil ls

test merge_renames-5 {[normalize_result] eq {f1
f2
f3}} knownBug

######################################
#
# Tests for troubles not specifically linked with renames but that I'd like to
# write:
#  [c26c63eb1b] - 'merge --backout' does not handle conflicts properly
#  [953031915f] - Lack of warning when overwriting extra files
#  [4df5f38f1e] - Troubles merging a file delete with a file change
Changes to test/mv-rm.test.
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
#
############################################################################
#
# MV / RM Commands
#

catch {exec $::fossilexe info} res
puts res=$res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

########################################
# Setup: Add Files and Commit          #






<







15
16
17
18
19
20
21

22
23
24
25
26
27
28
#
############################################################################
#
# MV / RM Commands
#

catch {exec $::fossilexe info} res

if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

########################################
# Setup: Add Files and Commit          #
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
cd [file join $rootDir subdir1]

fossil mv ../f1 .
test mv-soft-relative-1 {$RESULT eq "RENAME f1 subdir1/f1"}

fossil revert
test mv-soft-relative-2 {
  [normalize_result] eq "DELETE: subdir1/f1\nREVERTED: f1${undoMsg}"
}

cd $rootDir

###################################
# Test 2: Soft Move Relative File #
###################################

file mkdir [file join $rootDir subdir2]
cd [file join $rootDir subdir2]

fossil mv ../f2 ./f2
test mv-soft-relative-3 {$RESULT eq "RENAME f2 subdir2/f2"}

fossil revert
test mv-soft-relative-4 {
  [normalize_result] eq "DELETE: subdir2/f2\nREVERTED: f2${undoMsg}"
}

cd $rootDir

########################################
# Test 3: Hard Move Relative Directory #
########################################

file mkdir [file join $rootDir subdir3]
cd [file join $rootDir subdir3]

fossil mv --hard ../f3 .
test mv-hard-relative-1 {
  [normalize_result] eq "RENAME f3 subdir3/f3\nMOVED_FILE ${rootDir}/f3"
}

fossil revert
test mv-hard-relative-2 {
  [normalize_result] eq "DELETE: subdir3/f3\nREVERTED: f3${undoMsg}"
}

cd $rootDir

###################################
# Test 4: Hard Move Relative File #
###################################

file mkdir [file join $rootDir subdir4]
cd [file join $rootDir subdir4]

fossil mv --hard ../f4 ./f4
test mv-hard-relative-3 {
  [normalize_result] eq "RENAME f4 subdir4/f4\nMOVED_FILE ${rootDir}/f4"
}

fossil revert
test mv-hard-relative-4 {
  [normalize_result] eq "DELETE: subdir4/f4\nREVERTED: f4${undoMsg}"
}

cd $rootDir

########################################
# Test 5: Soft Move Absolute Directory #
########################################

file mkdir [file join $rootDir subdir5]
cd [file join $rootDir subdir5]

fossil mv [file join $rootDir f5] [file join $rootDir subdir5]
test mv-soft-absolute-1 {$RESULT eq "RENAME f5 subdir5/f5"}

fossil revert
test mv-soft-absolute-2 {
  [normalize_result] eq "DELETE: subdir5/f5\nREVERTED: f5${undoMsg}"
}

cd $rootDir

###################################
# Test 6: Soft Move Absolute File #
###################################

file mkdir [file join $rootDir subdir6]
cd [file join $rootDir subdir6]

fossil mv [file join $rootDir f6] [file join $rootDir subdir6 f6]
test mv-soft-absolute-3 {$RESULT eq "RENAME f6 subdir6/f6"}

fossil revert
test mv-soft-absolute-4 {
  [normalize_result] eq "DELETE: subdir6/f6\nREVERTED: f6${undoMsg}"
}

cd $rootDir

########################################
# Test 7: Hard Move Absolute Directory #
########################################

file mkdir [file join $rootDir subdir7]
cd [file join $rootDir subdir7]

fossil mv --hard [file join $rootDir f7] [file join $rootDir subdir7]
test mv-hard-absolute-1 {
  [normalize_result] eq "RENAME f7 subdir7/f7\nMOVED_FILE ${rootDir}/f7"
}

fossil revert
test mv-hard-absolute-2 {
  [normalize_result] eq "DELETE: subdir7/f7\nREVERTED: f7${undoMsg}"
}

cd $rootDir

###################################
# Test 8: Hard Move Absolute File #
###################################

file mkdir [file join $rootDir subdir8]
cd [file join $rootDir subdir8]

fossil mv --hard [file join $rootDir f8] [file join $rootDir subdir8 f8]
test mv-hard-absolute-3 {
  [normalize_result] eq "RENAME f8 subdir8/f8\nMOVED_FILE ${rootDir}/f8"
}

fossil revert
test mv-hard-absolute-4 {
  [normalize_result] eq "DELETE: subdir8/f8\nREVERTED: f8${undoMsg}"
}

cd $rootDir

##########################################
# Test 9: Soft Remove Relative Directory #
##########################################






|
















|


















|


















|
















|
















|


















|


















|







67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
cd [file join $rootDir subdir1]

fossil mv ../f1 .
test mv-soft-relative-1 {$RESULT eq "RENAME f1 subdir1/f1"}

fossil revert
test mv-soft-relative-2 {
  [normalize_result] eq "DELETE   subdir1/f1\nREVERT   f1${undoMsg}"
}

cd $rootDir

###################################
# Test 2: Soft Move Relative File #
###################################

file mkdir [file join $rootDir subdir2]
cd [file join $rootDir subdir2]

fossil mv ../f2 ./f2
test mv-soft-relative-3 {$RESULT eq "RENAME f2 subdir2/f2"}

fossil revert
test mv-soft-relative-4 {
  [normalize_result] eq "DELETE   subdir2/f2\nREVERT   f2${undoMsg}"
}

cd $rootDir

########################################
# Test 3: Hard Move Relative Directory #
########################################

file mkdir [file join $rootDir subdir3]
cd [file join $rootDir subdir3]

fossil mv --hard ../f3 .
test mv-hard-relative-1 {
  [normalize_result] eq "RENAME f3 subdir3/f3\nMOVED_FILE ${rootDir}/f3"
}

fossil revert
test mv-hard-relative-2 {
  [normalize_result] eq "DELETE   subdir3/f3\nREVERT   f3${undoMsg}"
}

cd $rootDir

###################################
# Test 4: Hard Move Relative File #
###################################

file mkdir [file join $rootDir subdir4]
cd [file join $rootDir subdir4]

fossil mv --hard ../f4 ./f4
test mv-hard-relative-3 {
  [normalize_result] eq "RENAME f4 subdir4/f4\nMOVED_FILE ${rootDir}/f4"
}

fossil revert
test mv-hard-relative-4 {
  [normalize_result] eq "DELETE   subdir4/f4\nREVERT   f4${undoMsg}"
}

cd $rootDir

########################################
# Test 5: Soft Move Absolute Directory #
########################################

file mkdir [file join $rootDir subdir5]
cd [file join $rootDir subdir5]

fossil mv [file join $rootDir f5] [file join $rootDir subdir5]
test mv-soft-absolute-1 {$RESULT eq "RENAME f5 subdir5/f5"}

fossil revert
test mv-soft-absolute-2 {
  [normalize_result] eq "DELETE   subdir5/f5\nREVERT   f5${undoMsg}"
}

cd $rootDir

###################################
# Test 6: Soft Move Absolute File #
###################################

file mkdir [file join $rootDir subdir6]
cd [file join $rootDir subdir6]

fossil mv [file join $rootDir f6] [file join $rootDir subdir6 f6]
test mv-soft-absolute-3 {$RESULT eq "RENAME f6 subdir6/f6"}

fossil revert
test mv-soft-absolute-4 {
  [normalize_result] eq "DELETE   subdir6/f6\nREVERT   f6${undoMsg}"
}

cd $rootDir

########################################
# Test 7: Hard Move Absolute Directory #
########################################

file mkdir [file join $rootDir subdir7]
cd [file join $rootDir subdir7]

fossil mv --hard [file join $rootDir f7] [file join $rootDir subdir7]
test mv-hard-absolute-1 {
  [normalize_result] eq "RENAME f7 subdir7/f7\nMOVED_FILE ${rootDir}/f7"
}

fossil revert
test mv-hard-absolute-2 {
  [normalize_result] eq "DELETE   subdir7/f7\nREVERT   f7${undoMsg}"
}

cd $rootDir

###################################
# Test 8: Hard Move Absolute File #
###################################

file mkdir [file join $rootDir subdir8]
cd [file join $rootDir subdir8]

fossil mv --hard [file join $rootDir f8] [file join $rootDir subdir8 f8]
test mv-hard-absolute-3 {
  [normalize_result] eq "RENAME f8 subdir8/f8\nMOVED_FILE ${rootDir}/f8"
}

fossil revert
test mv-hard-absolute-4 {
  [normalize_result] eq "DELETE   subdir8/f8\nREVERT   f8${undoMsg}"
}

cd $rootDir

##########################################
# Test 9: Soft Remove Relative Directory #
##########################################
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
test rm-soft-relative-3 {
  [normalize_result] eq "DELETED subdirC/f10\nDELETED subdirC/f11"
}

fossil revert
test rm-soft-relative-4 {
  [normalize_result] eq \
  "REVERTED: subdirB/f9\nREVERTED: subdirC/f10\nREVERTED: subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 10: Soft Remove Relative File #
######################################

file mkdir [file join $rootDir subdir2]
cd [file join $rootDir subdir2]

fossil rm ../f2
test rm-soft-relative-5 {$RESULT eq "DELETED f2"}

fossil revert
test rm-soft-relative-6 {
  [normalize_result] eq "REVERTED: f2${undoMsg}"
}

cd $rootDir

###########################################
# Test 11: Hard Remove Relative Directory #
###########################################






|
















|







220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
test rm-soft-relative-3 {
  [normalize_result] eq "DELETED subdirC/f10\nDELETED subdirC/f11"
}

fossil revert
test rm-soft-relative-4 {
  [normalize_result] eq \
  "REVERT   subdirB/f9\nREVERT   subdirC/f10\nREVERT   subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 10: Soft Remove Relative File #
######################################

file mkdir [file join $rootDir subdir2]
cd [file join $rootDir subdir2]

fossil rm ../f2
test rm-soft-relative-5 {$RESULT eq "DELETED f2"}

fossil revert
test rm-soft-relative-6 {
  [normalize_result] eq "REVERT   f2${undoMsg}"
}

cd $rootDir

###########################################
# Test 11: Hard Remove Relative Directory #
###########################################
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
  "DELETED subdirC/f10\nDELETED subdirC/f11\nDELETED_FILE\
${rootDir}/subdirC/f10\nDELETED_FILE ${rootDir}/subdirC/f11"
}

fossil revert
test rm-hard-relative-4 {
  [normalize_result] eq \
  "REVERTED: subdirB/f9\nREVERTED: subdirC/f10\nREVERTED: subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 12: Hard Remove Relative File #
######################################

file mkdir [file join $rootDir subdir4]
cd [file join $rootDir subdir4]

fossil rm --hard ../f4
test rm-hard-relative-5 {
  [normalize_result] eq "DELETED f4\nDELETED_FILE ${rootDir}/f4"
}

fossil revert
test rm-hard-relative-6 {
  [normalize_result] eq "REVERTED: f4${undoMsg}"
}

cd $rootDir

###########################################
# Test 13: Soft Remove Absolute Directory #
###########################################






|


















|







270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
  "DELETED subdirC/f10\nDELETED subdirC/f11\nDELETED_FILE\
${rootDir}/subdirC/f10\nDELETED_FILE ${rootDir}/subdirC/f11"
}

fossil revert
test rm-hard-relative-4 {
  [normalize_result] eq \
  "REVERT   subdirB/f9\nREVERT   subdirC/f10\nREVERT   subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 12: Hard Remove Relative File #
######################################

file mkdir [file join $rootDir subdir4]
cd [file join $rootDir subdir4]

fossil rm --hard ../f4
test rm-hard-relative-5 {
  [normalize_result] eq "DELETED f4\nDELETED_FILE ${rootDir}/f4"
}

fossil revert
test rm-hard-relative-6 {
  [normalize_result] eq "REVERT   f4${undoMsg}"
}

cd $rootDir

###########################################
# Test 13: Soft Remove Absolute Directory #
###########################################
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
test rm-soft-absolute-3 {
  [normalize_result] eq "DELETED subdirC/f10\nDELETED subdirC/f11"
}

fossil revert
test rm-soft-absolute-4 {
  [normalize_result] eq \
  "REVERTED: subdirB/f9\nREVERTED: subdirC/f10\nREVERTED: subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 14: Soft Remove Absolute File #
######################################

file mkdir [file join $rootDir subdir6]
cd [file join $rootDir subdir6]

fossil rm [file join $rootDir f6]
test rm-soft-absolute-5 {$RESULT eq "DELETED f6"}

fossil revert
test rm-soft-absolute-6 {
  [normalize_result] eq "REVERTED: f6${undoMsg}"
}

cd $rootDir

###########################################
# Test 15: Hard Remove Absolute Directory #
###########################################






|
















|







315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
test rm-soft-absolute-3 {
  [normalize_result] eq "DELETED subdirC/f10\nDELETED subdirC/f11"
}

fossil revert
test rm-soft-absolute-4 {
  [normalize_result] eq \
  "REVERT   subdirB/f9\nREVERT   subdirC/f10\nREVERT   subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 14: Soft Remove Absolute File #
######################################

file mkdir [file join $rootDir subdir6]
cd [file join $rootDir subdir6]

fossil rm [file join $rootDir f6]
test rm-soft-absolute-5 {$RESULT eq "DELETED f6"}

fossil revert
test rm-soft-absolute-6 {
  [normalize_result] eq "REVERT   f6${undoMsg}"
}

cd $rootDir

###########################################
# Test 15: Hard Remove Absolute Directory #
###########################################
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  "DELETED subdirC/f10\nDELETED subdirC/f11\nDELETED_FILE\
${rootDir}/subdirC/f10\nDELETED_FILE ${rootDir}/subdirC/f11"
}

fossil revert
test rm-hard-absolute-4 {
  [normalize_result] eq \
  "REVERTED: subdirB/f9\nREVERTED: subdirC/f10\nREVERTED: subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 16: Hard Remove Absolute File #
######################################

file mkdir [file join $rootDir subdir8]
cd [file join $rootDir subdir8]

fossil rm --hard [file join $rootDir f8]
test rm-hard-absolute-5 {
  [normalize_result] eq "DELETED f8\nDELETED_FILE ${rootDir}/f8"
}

fossil revert
test rm-hard-absolute-6 {
  [normalize_result] eq "REVERTED: f8${undoMsg}"
}

cd $rootDir

#######################################
# Test 17: Move File to New Directory #
#######################################






|


















|







363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
  "DELETED subdirC/f10\nDELETED subdirC/f11\nDELETED_FILE\
${rootDir}/subdirC/f10\nDELETED_FILE ${rootDir}/subdirC/f11"
}

fossil revert
test rm-hard-absolute-4 {
  [normalize_result] eq \
  "REVERT   subdirB/f9\nREVERT   subdirC/f10\nREVERT   subdirC/f11${undoMsg}"
}

cd $rootDir

######################################
# Test 16: Hard Remove Absolute File #
######################################

file mkdir [file join $rootDir subdir8]
cd [file join $rootDir subdir8]

fossil rm --hard [file join $rootDir f8]
test rm-hard-absolute-5 {
  [normalize_result] eq "DELETED f8\nDELETED_FILE ${rootDir}/f8"
}

fossil revert
test rm-hard-absolute-6 {
  [normalize_result] eq "REVERT   f8${undoMsg}"
}

cd $rootDir

#######################################
# Test 17: Move File to New Directory #
#######################################
Changes to test/revert.test.
















1
2
3
4
5
6
7
















#
# Tests for 'fossil revert'
# 
#

# Test 'fossil revert' against expected results from 'fossil changes' and
# 'fossil addremove -n', as well as by verifying the existence of files
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#
# Copyright (c) 2013 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
#
# Tests for 'fossil revert'
# 
#

# Test 'fossil revert' against expected results from 'fossil changes' and
# 'fossil addremove -n', as well as by verifying the existence of files
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
    test revert-$testid$key $passed
  }
  
  fossil undo
}

catch {exec $::fossilexe info} res
puts res=$res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

repo_init







<







53
54
55
56
57
58
59

60
61
62
63
64
65
66
    test revert-$testid$key $passed
  }
  
  fossil undo
}

catch {exec $::fossilexe info} res

if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

repo_init

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
# Rename f3 to f3n
file rename -force f3 f3n
fossil mv f3 f3n

# Test 'fossil revert' with no arguments
#
revert-test 1-1 {} {
  UNMANAGE: f0
  REVERTED: f1
  REVERTED: f2
  REVERTED: f3
  DELETE: f3n
} -addremove {
  ADDED f0
} -exists {f0 f1 f2 f3} -notexists f3n

# Test with a single filename argument
#
revert-test 1-2 f0 {
  UNMANAGE: f0
} -changes {
  DELETED f1
  EDITED f2
  RENAMED f3n
} -addremove {
  ADDED f0
} -exists {f0 f2 f3n} -notexists f3

revert-test 1-3 f1 {
  REVERTED: f1
} -changes {
  ADDED f0
  EDITED f2
  RENAMED f3n
} -exists {f0 f1 f2 f3n} -notexists f3

revert-test 1-4 f2 {
  REVERTED: f2
} -changes {
  ADDED f0
  DELETED f1
  RENAMED f3n
} -exists {f0 f2 f3n} -notexists {f1 f3}

# Both files involved in a rename are reverted regardless of which filename
# is used as an argument to 'fossil revert'
#
revert-test 1-5 f3 {
  REVERTED: f3
  DELETE: f3n
} -changes {
  ADDED f0
  DELETED f1
  EDITED f2
} -exists {f0 f2 f3} -notexists {f1 f3n}

revert-test 1-6 f3n {
  REVERTED: f3
  DELETE: f3n
} -changes {
  ADDED f0
  DELETED f1
  EDITED f2
} -exists {f0 f2 f3} -notexists {f1 f3n}

# Test with multiple filename arguments
#
revert-test 1-7 {f0 f2 f3n} {
  UNMANAGE: f0
  REVERTED: f2
  REVERTED: f3
  DELETE: f3n
} -changes {
  DELETED f1
} -addremove {
  ADDED f0
} -exists {f0 f2 f3} -notexists {f1 f3n}








|
|
|
|
|







|









|







|










|
|







|
|









|
|
|
|







85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
# Rename f3 to f3n
file rename -force f3 f3n
fossil mv f3 f3n

# Test 'fossil revert' with no arguments
#
revert-test 1-1 {} {
  UNMANAGE f0
  REVERT   f1
  REVERT   f2
  REVERT   f3
  DELETE   f3n
} -addremove {
  ADDED f0
} -exists {f0 f1 f2 f3} -notexists f3n

# Test with a single filename argument
#
revert-test 1-2 f0 {
  UNMANAGE f0
} -changes {
  DELETED f1
  EDITED f2
  RENAMED f3n
} -addremove {
  ADDED f0
} -exists {f0 f2 f3n} -notexists f3

revert-test 1-3 f1 {
  REVERT   f1
} -changes {
  ADDED f0
  EDITED f2
  RENAMED f3n
} -exists {f0 f1 f2 f3n} -notexists f3

revert-test 1-4 f2 {
  REVERT   f2
} -changes {
  ADDED f0
  DELETED f1
  RENAMED f3n
} -exists {f0 f2 f3n} -notexists {f1 f3}

# Both files involved in a rename are reverted regardless of which filename
# is used as an argument to 'fossil revert'
#
revert-test 1-5 f3 {
  REVERT   f3
  DELETE   f3n
} -changes {
  ADDED f0
  DELETED f1
  EDITED f2
} -exists {f0 f2 f3} -notexists {f1 f3n}

revert-test 1-6 f3n {
  REVERT   f3
  DELETE   f3n
} -changes {
  ADDED f0
  DELETED f1
  EDITED f2
} -exists {f0 f2 f3} -notexists {f1 f3n}

# Test with multiple filename arguments
#
revert-test 1-7 {f0 f2 f3n} {
  UNMANAGE f0
  REVERT   f2
  REVERT   f3
  DELETE   f3n
} -changes {
  DELETED f1
} -addremove {
  ADDED f0
} -exists {f0 f2 f3} -notexists {f1 f3n}


155
156
157
158
159
160
161
162
163
164















write_file f1n "f1n"
fossil mv f1 f1n
write_file f1 "f1b"
fossil add f1

revert-test 2-1 {} {
  REVERTED: f1
  DELETE: f1n
} -exists {f1} -notexists {f1n}





















|
|

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
write_file f1n "f1n"
fossil mv f1 f1n
write_file f1 "f1b"
fossil add f1

revert-test 2-1 {} {
  REVERT   f1
  DELETE   f1n
} -exists {f1} -notexists {f1n}


# Test reverting a rename in the repo but not completed in the file
# system
repo_init
write_file f1 "f1"
fossil add f1
fossil commit -m "add f1"
fossil mv --soft f1 f1new
test 3-mv-1 {[file exists f1]}
test 3-mv-2 {![file exists f1new]}
revert-test 3-1 {} {
  REVERT f1
  DELETE f1new
} -exists {f1} -notexists {f1n}
Added test/stash.test.


























































































































































































































































































































































































































































































































































































































































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
#
# Copyright (c) 2013 D. Richard Hipp
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
############################################################################
#
#
# Tests for 'fossil stash'
# 
#

proc knownBug {t tests} {
  return [expr {$t in $tests ? "knownBug" : ""}]
}

# Test 'fossil stash' against expected results from 'fossil changes' and
# 'fossil addremove -n', as well as by verifying the existence of files
# on the file system. Unlike the similar function found in
# revert.test, 'fossil undo' is not called after each test because
# many stash operations aren't undoable, and because further testing
# of the stash content is more likely to be useful.
#
# The extra list "-knownbugs" is a list of areas that should be
# marked as "knownBug" to the inner call to test. Known areas are:
#  -code       The exit status of fossil stash
#  -result     The result string didn't match
#  -changes    The changed file set didn't match
#  -addremove  The addremove result set didn't match
#  -exists     One or more listed files don't exist
#  -notexists  One or more listed files do exist
#
# Also, if the exit status of fossil stash does not match
# expectations, the rest of the areas are not tested.
proc test_result_state {testid cmdArgs expectedOutput args} {
  global RESULT
  set passed 1
  
  set args [dict merge {
    -changes {} -addremove {} -exists {} -notexists {} -knownbugs {}
  } $args]

  set knownbugs [dict get $args "-knownbugs"]
  set result $::RESULT
  set code $::CODE
  if {[lindex $cmdArgs end] eq "-expectError"} {
    test $testid-CODE {$code}  [knownBug "-code" $knownbugs]
    if {!$code} {
      return
    }
  } else {
    test $testid-CODE {!$code}  [knownBug "-code" $knownbugs]
    if {$code} {
      return
    }
  }
  test_status_list $testid $result $expectedOutput [knownBug "-result" $knownbugs]
  
  set statusListTests [list -changes changes -addremove {addremove -n}]
  foreach {key fossilArgs} $statusListTests {
    set expected [dict get $args $key]
    set result [fossil {*}$fossilArgs] 
    test_status_list $testid$key $result $expected [knownBug $key $knownbugs]
  }
  
  set fileExistsTests [list -exists 1 does -notexists 0 should]
  foreach {key expected verb} $fileExistsTests {
    foreach path [dict get $args $key] {
      if {[file exists $path] != $expected} {
        set passed 0
        protOut "  Failure: File $verb not exist: $path"
      }
    }
    test $testid$key $passed [knownBug $key $knownbugs]
  }
  
  #fossil undo
}

proc stash-test {testid stashArgs expectedStashOutput args} {
  fossil stash {*}$stashArgs
  return [test_result_state stash-$testid "stash $stashArgs" $expectedStashOutput {*}$args] 
}

catch {exec $::fossilexe info} res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

repo_init

# Prepare first commit
#
write_file f1 "f1"
write_file f2 "f2"
write_file f3 "f3"
fossil add f1 f2 f3
fossil commit -m "c1" --tag c1

########
# fossil stash
# fossil stash save ?-m|--comment COMMENT? ?FILES...?

# Make simple changes to stash
# Add f0, remove f1, edit f2, rename f3 to f3n
write_file f0 "f0"
fossil add f0
file delete f1
fossil rm f1
write_file f2 "f2.1"
file rename -force f3 f3n
fossil mv f3 f3n

# Stash these changes and confirm
stash-test 1 {save -m "stash 1"} {
  UNMANAGE f0
  REVERT   f1
  REVERT   f2
  REVERT   f3
  DELETE   f3n
} -addremove {
  ADDED f0
} -exists {f0 f1 f2 f3} -notexists {f3n}

########
# fossil stash list|ls  ?-v|--verbose? ?-W|--width <num>?

# Confirm there is a stash saved
fossil stash list
#protOut "{[normalize_result]}"
#{1: [21bc64cff8c702] on 2016-02-10 19:48:44
#       stash 1}
test stash-1-list-1 {[regexp {^1: \[[0-9a-z]+\] on } [first_data_line]]}
test stash-1-list-2 {[regexp {^\s+stash 1\s*$} [second_data_line]]}

set diff_stash_1 {DELETE f1
Index: f1
==================================================================
--- f1
+++ f1
@@ -1,1 +0,0 @@
-f1

CHANGED f2
--- f2
+++ f2
@@ -1,1 +1,1 @@
-f2
+f2.1

CHANGED f3n
--- f3n
+++ f3n

ADDED f0
Index: f0
==================================================================
--- f0
+++ f0
@@ -0,0 +1,1 @@
+f0}

########
# fossil stash show|cat ?STASHID? ?DIFF-OPTIONS?
# fossil stash [g]diff ?STASHID? ?DIFF-OPTIONS?

fossil stash show
test stash-1-show {[normalize_result] eq $diff_stash_1}
fossil stash diff
test stash-1-diff {[normalize_result] eq $diff_stash_1}

########
# fossil stash pop

stash-test 2 pop {
  DELETE f1
  UPDATE f2
  UPDATE f3n
  ADDED  f0
} -changes {
  ADDED      f0
  MISSING    f1
  EDITED     f2
  MISSING    f3
} -addremove {
  ADDED  f3n
  DELETED  f1
  DELETED  f3
} -exists {f0 f2 f3n} -notexists {f1 f3}

# Confirm there is no longer a stash saved
fossil stash list
test stash-2-list {[first_data_line] eq "empty stash"}


# Test stashed mv without touching the file system
# Issue reported by email to fossil-users
#   from Warren Young, dated Tue, 9 Feb 2016 01:22:54 -0700
#   with checkin [b8c7af5bd9] plus a local patch on CentOS 5
#   64 bit intel, 8-byte pointer, 4-byte integer
# Stashed renamed file said:
# fossil: ./src/delta.c:231: checksum: Assertion '...' failed.
# Should be triggered by this stash-WY-1 test.
fossil checkout --force c1
fossil clean
fossil mv --soft f1 f1new
stash-test WY-1 {save -m "Reported 2016-02-09"} {
  REVERT   f1
  DELETE   f1new
} -changes {
} -addremove {
} -exists {f1 f2 f3} -notexists {f1new} -knownbugs {-code -result}
# TODO: add tests that verify the saved stash is sensible. Possibly
# by applying it and checking results. But until the SQLITE_CONSTRAINT
# error is fixed, there is nothing stashed to test.



# Test stashing the combination of a renamed file and an added file that
# uses the renamed file's original filename. I expect to see the same
# behavior as fossil revert: calmly back out both the rename and the
# add, and presumably stash the content of the added file before it
# is replaced by the revert.
#
repo_init
write_file f1 "f1"
fossil add f1
fossil commit -m "add f1"

write_file f1n "f1n"
fossil mv f1 f1n
write_file f1 "f1b"
fossil add f1

stash-test 2-1 {save -m "f1b"} {
  REVERT   f1
  DELETE   f1n
} -exists {f1} -notexists {f1n} -knownbugs {-code -result}
# TODO: add tests that verify the saved stash is sensible. Possibly
# by applying it and checking results. But until the MISSING file
# error is fixed, there is nothing stashed to test.
 

# Test stashing a newly added (but never committed) file. As with
# fossil revert, fossil stash save unmanages the new file, but 
# leaves the copy present on disk. This is undocumented, but 
# probably sensible.
repo_init
write_file f1 "f1"
write_file f2 "f2"
fossil add f1 f2
fossil commit -m "baseline"

write_file f3 "f3"
fossil add f3
stash-test 3-1 {save -m f3} {
  UNMANAGE f3
} -addremove {
  ADDED f3
} -exists {f1 f2 f3} -notexists {}
#fossil status
fossil stash show
test stash-3-1-show {[normalize_result] eq {ADDED f3
Index: f3
==================================================================
--- f3
+++ f3
@@ -0,0 +1,1 @@
+f3}}
stash-test 3-1-pop {pop} {
  ADDED f3
} -changes {
  ADDED f3
} -addremove {
} -exists {f1 f2 f3} -notexists {}
fossil status


# Test stashing a rename of one file with at least one file
# unchanged. This should stash (and revert) just the rename
# operation. Instead it also stores and touches the unchanged file. 
repo_init
write_file f1 "f1"
write_file f2 "f2"
fossil add f1 f2
fossil commit -m "baseline"

fossil mv --hard f2 f2n
test_result_state stash-3-4-mv "mv --hard f2 f2n" {
  RENAME f2 f2n
  MOVED_FILE f2
} -changes {
  RENAMED f2n
} -addremove {
} -exists {f1 f2n} -notexists {f2}

stash-test 3-2 {save -m f2n} {
  REVERT f2
  DELETE f2n
} -exists {f1 f2} -notexists {f2n} -knownbugs {-result}
fossil stash show
test stash-3-2-show-1 {![regexp {\sf1} $RESULT]} knownBug
test stash-3-2-show-2 {[regexp {\sf2n} $RESULT]}
stash-test 3-2-pop {pop} {
  UPDATE f1
  UPDATE f2n
} -changes {
  RENAMED    f2n
} -addremove {
  ADDED  f2n
  DELETED  f2
} -exists {f1 f2n} -notexists {f2} -knownbugs {-changes}



########
# fossil stash snapshot ?-m|--comment COMMENT? ?FILES...?

repo_init
write_file f1 "f1"
write_file f2 "f2"
write_file f3 "f3"
fossil add f1 f2 f3
fossil commit -m "c1" --tag c1

# Make simple changes and snapshot them
# Add f0, edit f2
write_file f0 "f0"
fossil add f0
write_file f2 "f2.1"

# Snapshot these changes and confirm
stash-test 4-1 {snapshot -m "snap 1"} {
} -changes {
  ADDED      f0
  EDITED     f2
} -addremove {
} -exists {f0 f1 f2 f3} -notexists {}
fossil stash diff
test stash-4-1-diff-CODE {!$::CODE}
fossil stash show
test stash-4-1-show-1 {[regexp {CHANGED f2} $RESULT]}
test stash-4-1-show-2 {[regexp {ADDED f0} $RESULT]}

# remove f1 and snapshot
file delete f1
fossil rm f1
stash-test 4-2 {snapshot -m "snap 2"} {
} -changes {
  ADDED      f0
  DELETED    f1
  EDITED     f2
} -addremove {
} -exists {f0 f2 f3} -notexists {f1}
fossil stash diff
test stash-4-2-diff-CODE {!$::CODE} ;# knownBug
fossil stash show
test stash-4-2-show-1 {[regexp {DELETE f1} $RESULT]}
test stash-4-2-show-2 {[regexp {CHANGED f2} $RESULT]}
test stash-4-2-show-3 {[regexp {ADDED f0} $RESULT]}


# rename f3 to f3n and snapshot
file rename -force f3 f3n
fossil mv f3 f3n
stash-test 4-3 {snapshot -m "snap 3"} {
} -changes {
  ADDED      f0
  DELETED    f1
  EDITED     f2
  RENAMED    f3n
} -addremove {
} -exists {f0 f2 f3n} -notexists {f1 f3}
fossil stash diff
test stash-4-3-diff-CODE {!$::CODE} ;# knownBug
fossil stash show
test stash-4-3-show-1 {[regexp {DELETE f1} $RESULT]}
test stash-4-3-show-2 {[regexp {CHANGED f2} $RESULT]}
test stash-4-3-show-2 {[regexp {CHANGED f3n} $RESULT]}
test stash-4-3-show-3 {[regexp {ADDED f0} $RESULT]}

# fossil stash apply ?STASHID?
# fossil stash goto ?STASHID?
# fossil stash rm|drop ?STASHID? ?-a|--all?

#fossil checkout --force c1
#fossil clean
Changes to test/tester.tcl.
19
20
21
22
23
24
25

26
27
28






29
30
31
32
33
34
35
#
#     tclsh ../test/tester.tcl ../bld/fossil
#
# Where ../test/tester.tcl is the name of this file and ../bld/fossil
# is the name of the executable to be tested.
#


set testrundir [pwd]
set testdir [file normalize [file dir $argv0]]
set fossilexe [file normalize [lindex $argv 0]]






set argv [lrange $argv 1 end]

set i [lsearch $argv -halt]
if {$i>=0} {
  set HALT 1
  set argv [lreplace $argv $i $i]
} else {






>

|

>
>
>
>
>
>







19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
#
#     tclsh ../test/tester.tcl ../bld/fossil
#
# Where ../test/tester.tcl is the name of this file and ../bld/fossil
# is the name of the executable to be tested.
#

set testfiledir [file normalize [file dirname [info script]]]
set testrundir [pwd]
set testdir [file normalize [file dirname $argv0]]
set fossilexe [file normalize [lindex $argv 0]]

if {$tcl_platform(platform) eq "windows" && \
    [string length [file extension $fossilexe]] == 0} {
  append fossilexe .exe
}

set argv [lrange $argv 1 end]

set i [lsearch $argv -halt]
if {$i>=0} {
  set HALT 1
  set argv [lreplace $argv $i $i]
} else {
47
48
49
50
51
52
53
















54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82

83

84
85
86
87
88
89
90
set i [lsearch $argv -verbose]
if {$i>=0} {
  set VERBOSE 1
  set argv [lreplace $argv $i $i]
} else {
  set VERBOSE 0
}

















if {[llength $argv]==0} {
  foreach f [lsort [glob $testdir/*.test]] {
    set base [file root [file tail $f]]
    lappend argv $base
  }
}

set tempPath [expr {[info exists env(TEMP)] ? \
    $env(TEMP) : [file dirname [info script]]}]

if {$tcl_platform(platform) eq "windows"} then {
  set tempPath [string map [list \\ /] $tempPath]
}

# start protocol
#
proc protInit {cmd} {
  if {$::PROT} {
    set out [open [file join $::testrundir prot] w]
    fconfigure $out -translation platform
    puts $out "starting tests with: $cmd"
    close $out
  }
}

# write protocol
#
proc protOut {msg} {

  puts stdout $msg

  if {$::PROT} {
    set out [open [file join $::testrundir prot] a]
    fconfigure $out -translation platform
    puts $out $msg
    close $out
  }
}






>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>








<
<
<
<
<
<
<













|
>
|
>







54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84







85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
set i [lsearch $argv -verbose]
if {$i>=0} {
  set VERBOSE 1
  set argv [lreplace $argv $i $i]
} else {
  set VERBOSE 0
}

set i [lsearch $argv -quiet]
if {$i>=0} {
  set QUIET 1
  set argv [lreplace $argv $i $i]
} else {
  set QUIET 0
}

set i [lsearch $argv -strict]
if {$i>=0} {
  set STRICT 1
  set argv [lreplace $argv $i $i]
} else {
  set STRICT 0
}

if {[llength $argv]==0} {
  foreach f [lsort [glob $testdir/*.test]] {
    set base [file root [file tail $f]]
    lappend argv $base
  }
}








# start protocol
#
proc protInit {cmd} {
  if {$::PROT} {
    set out [open [file join $::testrundir prot] w]
    fconfigure $out -translation platform
    puts $out "starting tests with: $cmd"
    close $out
  }
}

# write protocol
#
proc protOut {msg {noQuiet 0}} {
  if {$noQuiet || !$::QUIET} {
    puts stdout $msg
  }
  if {$::PROT} {
    set out [open [file join $::testrundir prot] a]
    fconfigure $out -translation platform
    puts $out $msg
    close $out
  }
}
107
108
109
110
111
112
113





114
115
116
117
118
119
120

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# diagnostics should be emitted when no error is seen.
# Sets the CODE and RESULT global variables for use in
# test expressions.
#
proc fossil_maybe_answer {answer args} {
  global fossilexe
  set cmd $fossilexe





  foreach a $args {
    lappend cmd $a
  }
  protOut $cmd

  flush stdout
  if {[string length $answer] > 0} {

    set prompt_file [file join $::tempPath fossil_prompt_answer]
    write_file $prompt_file $answer\n
    set rc [catch {eval exec $cmd <$prompt_file} result]
    file delete $prompt_file
  } else {
    set rc [catch {eval exec $cmd} result]
  }
  global RESULT CODE
  set CODE $rc
  if {$rc} {
    protOut "ERROR: $result"
  } elseif {$::VERBOSE} {
    protOut "RESULT: $result"
  }
  set RESULT $result
}

# Read a file into memory.






>
>
>
>
>







>









|
|







125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# diagnostics should be emitted when no error is seen.
# Sets the CODE and RESULT global variables for use in
# test expressions.
#
proc fossil_maybe_answer {answer args} {
  global fossilexe
  set cmd $fossilexe
  set expectError 0
  if {[lindex $args end] eq "-expectError"} {
    set expectError 1
    set args [lrange $args 0 end-1]
  }
  foreach a $args {
    lappend cmd $a
  }
  protOut $cmd

  flush stdout
  if {[string length $answer] > 0} {
    protOut $answer
    set prompt_file [file join $::tempPath fossil_prompt_answer]
    write_file $prompt_file $answer\n
    set rc [catch {eval exec $cmd <$prompt_file} result]
    file delete $prompt_file
  } else {
    set rc [catch {eval exec $cmd} result]
  }
  global RESULT CODE
  set CODE $rc
  if {($rc && !$expectError) || (!$rc && $expectError)} {
    protOut "ERROR: $result" 1
  } elseif {$::VERBOSE} {
    protOut "RESULT: $result"
  }
  set RESULT $result
}

# Read a file into memory.
171
172
173
174
175
176
177
178
179
180

181
182
183
184
185
186
187
188
189









































190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250


251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288

289
290



291

292




293

294
295

296
297
298

299
300
301
302
303
304
305
#
proc repo_init {{filename ".rep.fossil"}} {
  if {$::env(HOME) ne [pwd]} {
    catch {exec $::fossilexe info} res
    if {![regexp {use --repository} $res]} {
      error "In an open checkout: cannot initialize a new repository here."
    }
    # Fossil will write data on $HOME, running 'fossil new' here.
    # We need not to clutter the $HOME of the test caller.
    #

    set ::env(HOME) [pwd]
  }
  catch {exec $::fossilexe close -f}
  file delete $filename
  exec $::fossilexe new $filename
  exec $::fossilexe open $filename
  exec $::fossilexe clean -f
  exec $::fossilexe set mtime-changes off
}










































# Normalize file status lists (like those returned by 'fossil changes')
# so they can be compared using simple string comparison
#
proc normalize_status_list {list} {
  set normalized [list]
  set matches [regexp -all -inline -line {^\s*([A-Z_]+:?)\x20+(\S.*)$} $list]
  foreach {_ status file} $matches {
    lappend normalized [list $status [string trim $file]]
  }
  set normalized [lsort -index 1 $normalized]
  return $normalized
}

# Perform a test comparing two status lists
#
proc test_status_list {name result expected} {
  set expected [normalize_status_list $expected]
  set result [normalize_status_list $result]
  if {$result eq $expected} {
    test $name 1
  } else {
    protOut "  Expected:\n    [join $expected "\n    "]"
    protOut "  Got:\n    [join $result "\n    "]"
    test $name 0
  }
}

# Append all arguments into a single value and then returns it.
#
proc appendArgs {args} {
  eval append result $args
}

# Return the name of the versioned settings file containing the TH1
# setup script.
#
proc getTh1SetupFileName {} {
  #
  # NOTE: This uses the "testdir" global variable provided by the
  #       test suite; alternatively, the root of the source tree
  #       could be obtained directly from Fossil.
  #
  return [file normalize [file join [file dirname $::testdir] \
      .fossil-settings th1-setup]]
}

# Return the saved name of the versioned settings file containing
# the TH1 setup script.
#
proc getSavedTh1SetupFileName {} {
  return [appendArgs [getTh1SetupFileName] . [pid]]
}

# Sets the TH1 setup script to the one provided.  Prior to calling
# this, the [saveTh1SetupFile] procedure should be called in order to
# preserve the existing TH1 setup script.  Prior to completing the test,
# the [restoreTh1SetupFile] procedure should be called to restore the
# original TH1 setup script.
#
proc writeTh1SetupFile { data } {


  return [write_file [getTh1SetupFileName] $data]
}

# Saves the TH1 setup script file by renaming it, based on the current
# process ID.
#
proc saveTh1SetupFile {} {
  set oldFileName [getTh1SetupFileName]
  if {[file exists $oldFileName]} then {
    set newFileName [getSavedTh1SetupFileName]
    catch {file delete $newFileName}
    file rename $oldFileName $newFileName
  }
}

# Restores the original TH1 setup script file by renaming it back, based
# on the current process ID.
#
proc restoreTh1SetupFile {} {
  set oldFileName [getSavedTh1SetupFileName]
  set newFileName [getTh1SetupFileName]
  if {[file exists $oldFileName]} then {
    catch {file delete $newFileName}
    file rename $oldFileName $newFileName
  } else {
    #
    # NOTE: There was no TH1 setup script file, delete the test one.
    #
    file delete $newFileName
  }
}

# Perform a test
#
set test_count 0
proc test {name expr} {
  global bad_test test_count
  incr test_count

  set r [uplevel 1 [list expr $expr]]
  if {$r} {



    protOut "test $name OK"

  } else {




    protOut "test $name FAILED!"

    lappend bad_test $name
    if {$::HALT} exit

  }
}
set bad_test {}


# Return a random string N characters long.
#
set vocabulary 01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
append vocabulary "       ()*^!.eeeeeeeeaaaaattiioo   "
set nvocabulary [string length $vocabulary]
proc rand_str {N} {






|


>









>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
















|



|

|
|
|


















|
<
















>
>
|







|












|













|
|

>


>
>
>
|
>

>
>
>
>
|
>
|
|
>



>







195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299

300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
#
proc repo_init {{filename ".rep.fossil"}} {
  if {$::env(HOME) ne [pwd]} {
    catch {exec $::fossilexe info} res
    if {![regexp {use --repository} $res]} {
      error "In an open checkout: cannot initialize a new repository here."
    }
    # Fossil will write data on $FOSSIL_HOME, running 'fossil new' here.
    # We need not to clutter the $HOME of the test caller.
    #
    set ::env(FOSSIL_HOME) [pwd]
    set ::env(HOME) [pwd]
  }
  catch {exec $::fossilexe close -f}
  file delete $filename
  exec $::fossilexe new $filename
  exec $::fossilexe open $filename
  exec $::fossilexe clean -f
  exec $::fossilexe set mtime-changes off
}

# This procedure only returns non-zero if the Tcl integration feature was
# enabled at compile-time and is now enabled at runtime.
proc is_tcl_usable_by_fossil {} {
  fossil test-th-eval "hasfeature tcl"
  if {$::RESULT ne "1"} {return 0}
  fossil test-th-eval "setting tcl"
  if {$::RESULT eq "1"} {return 1}
  fossil test-th-eval --open-config "setting tcl"
  if {$::RESULT eq "1"} {return 1}
  return [info exists ::env(TH1_ENABLE_TCL)]
}

# This procedure only returns non-zero if the TH1 hooks feature was enabled
# at compile-time and is now enabled at runtime.
proc are_th1_hooks_usable_by_fossil {} {
  fossil test-th-eval "hasfeature th1Hooks"
  if {$::RESULT ne "1"} {return 0}
  fossil test-th-eval "setting th1-hooks"
  if {$::RESULT eq "1"} {return 1}
  fossil test-th-eval --open-config "setting th1-hooks"
  if {$::RESULT eq "1"} {return 1}
  return [info exists ::env(TH1_ENABLE_HOOKS)]
}

# This (rarely used) procedure is designed to run a test within the Fossil
# source checkout (e.g. one that does NOT modify any state), while saving
# and restoring the current directory (e.g. one used when running a test
# file outside of the Fossil source checkout).  Please do NOT use this
# procedure unless you are absolutely sure it does not modify the state of
# the repository or source checkout in any way.
#
proc run_in_checkout { script {dir ""} } {
  if {[string length $dir] == 0} {set dir $::testfiledir}
  set savedPwd [pwd]; cd $dir
  set code [catch {
    uplevel 1 $script
  } result]
  cd $savedPwd; unset savedPwd
  return -code $code $result
}

# Normalize file status lists (like those returned by 'fossil changes')
# so they can be compared using simple string comparison
#
proc normalize_status_list {list} {
  set normalized [list]
  set matches [regexp -all -inline -line {^\s*([A-Z_]+:?)\x20+(\S.*)$} $list]
  foreach {_ status file} $matches {
    lappend normalized [list $status [string trim $file]]
  }
  set normalized [lsort -index 1 $normalized]
  return $normalized
}

# Perform a test comparing two status lists
#
proc test_status_list {name result expected {constraints ""}} {
  set expected [normalize_status_list $expected]
  set result [normalize_status_list $result]
  if {$result eq $expected} {
    test $name 1 $constraints
  } else {
    protOut "  Expected:\n    [join $expected "\n    "]" 1
    protOut "  Got:\n    [join $result "\n    "]" 1
    test $name 0 $constraints
  }
}

# Append all arguments into a single value and then returns it.
#
proc appendArgs {args} {
  eval append result $args
}

# Return the name of the versioned settings file containing the TH1
# setup script.
#
proc getTh1SetupFileName {} {
  #
  # NOTE: This uses the "testdir" global variable provided by the
  #       test suite; alternatively, the root of the source tree
  #       could be obtained directly from Fossil.
  #
  return [file normalize [file join .fossil-settings th1-setup]]

}

# Return the saved name of the versioned settings file containing
# the TH1 setup script.
#
proc getSavedTh1SetupFileName {} {
  return [appendArgs [getTh1SetupFileName] . [pid]]
}

# Sets the TH1 setup script to the one provided.  Prior to calling
# this, the [saveTh1SetupFile] procedure should be called in order to
# preserve the existing TH1 setup script.  Prior to completing the test,
# the [restoreTh1SetupFile] procedure should be called to restore the
# original TH1 setup script.
#
proc writeTh1SetupFile { data } {
  set fileName [getTh1SetupFileName]
  file mkdir [file dirname $fileName]
  return [write_file $fileName $data]
}

# Saves the TH1 setup script file by renaming it, based on the current
# process ID.
#
proc saveTh1SetupFile {} {
  set oldFileName [getTh1SetupFileName]
  if {[file exists $oldFileName]} {
    set newFileName [getSavedTh1SetupFileName]
    catch {file delete $newFileName}
    file rename $oldFileName $newFileName
  }
}

# Restores the original TH1 setup script file by renaming it back, based
# on the current process ID.
#
proc restoreTh1SetupFile {} {
  set oldFileName [getSavedTh1SetupFileName]
  set newFileName [getTh1SetupFileName]
  if {[file exists $oldFileName]} {
    catch {file delete $newFileName}
    file rename $oldFileName $newFileName
  } else {
    #
    # NOTE: There was no TH1 setup script file, delete the test one.
    #
    file delete $newFileName
  }
}

# Perform a test
#
set test_count 0
proc test {name expr {constraints ""}} {
  global bad_test ignored_test test_count RESULT
  incr test_count
  set knownBug [expr {"knownBug" in $constraints}]
  set r [uplevel 1 [list expr $expr]]
  if {$r} {
    if {$knownBug && !$::STRICT} {
      protOut "test $name OK (knownBug)?"
    } else {
      protOut "test $name OK"
    }
  } else {
    if {$knownBug && !$::STRICT} {
      protOut "test $name FAILED (knownBug)!" 1
      lappend ignored_test $name
    } else {
      protOut "test $name FAILED!" 1
      if {$::QUIET} {protOut "RESULT: $RESULT" 1}
      lappend bad_test $name
      if {$::HALT} exit
    }
  }
}
set bad_test {}
set ignored_test {}

# Return a random string N characters long.
#
set vocabulary 01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
append vocabulary "       ()*^!.eeeeeeeeaaaaattiioo   "
set nvocabulary [string length $vocabulary]
proc rand_str {N} {
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
  set outFileName [file join $::tempPath [appendArgs test-http-out- $suffix]]
  set data [subst [read_file $dataFileName]]

  write_file $inFileName $data
  fossil http $inFileName $outFileName 127.0.0.1 $repository --localauth
  set result [expr {[file exists $outFileName] ? [read_file $outFileName] : ""}]

  if {1} then {
    catch {file delete $inFileName}
    catch {file delete $outFileName}
  }

  return $result
}







|







444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
  set outFileName [file join $::tempPath [appendArgs test-http-out- $suffix]]
  set data [subst [read_file $dataFileName]]

  write_file $inFileName $data
  fossil http $inFileName $outFileName 127.0.0.1 $repository --localauth
  set result [expr {[file exists $outFileName] ? [read_file $outFileName] : ""}]

  if {1} {
    catch {file delete $inFileName}
    catch {file delete $outFileName}
  }

  return $result
}

416
417
418
419
420
421
422
















423
424
425
426
427
428
429
430
431
432
433
434
435
436

437

438
439
440







  return [lindex [split [normalize_result] \n] end-1]
}

# returns the third to last line of the normalized result.
proc third_to_last_data_line {} {
  return [lindex [split [normalize_result] \n] end-2]
}

















protInit $fossilexe
foreach testfile $argv {
  set dir [file root [file tail $testfile]]
  file delete -force $dir
  file mkdir $dir
  set origwd [pwd]
  cd $dir
  protOut "***** $testfile ******"
  source $testdir/$testfile.test
  protOut "***** End of $testfile: [llength $bad_test] errors so far ******"
  cd $origwd
}
set nErr [llength $bad_test]

protOut "***** Final result: $nErr errors out of $test_count tests"

if {$nErr>0} {
  protOut "***** Failures: $bad_test"
}













>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>














>
|
>

|

>
>
>
>
>
>
>
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
  return [lindex [split [normalize_result] \n] end-1]
}

# returns the third to last line of the normalized result.
proc third_to_last_data_line {} {
  return [lindex [split [normalize_result] \n] end-2]
}

set tempPath [expr {[info exists env(TEMP)] ? \
    $env(TEMP) : [file dirname [info script]]}]

if {$tcl_platform(platform) eq "windows"} {
  set tempPath [string map [list \\ /] $tempPath]
}

set tempPath [file normalize $tempPath]

if {[catch {
  write_file [file join $tempPath temporary.txt] [clock seconds]
} error] != 0} {
  error "could not write file to directory \"$tempPath\",\
please set TEMP variable in environment: $error"
}

protInit $fossilexe
foreach testfile $argv {
  set dir [file root [file tail $testfile]]
  file delete -force $dir
  file mkdir $dir
  set origwd [pwd]
  cd $dir
  protOut "***** $testfile ******"
  source $testdir/$testfile.test
  protOut "***** End of $testfile: [llength $bad_test] errors so far ******"
  cd $origwd
}
set nErr [llength $bad_test]
if {$nErr>0 || !$::QUIET} {
  protOut "***** Final results: $nErr errors out of $test_count tests" 1
}
if {$nErr>0} {
  protOut "***** Considered failures: $bad_test" 1
}
set nErr [llength $ignored_test]
if {$nErr>0 || !$::QUIET} {
  protOut "***** Ignored results: $nErr ignored errors out of $test_count tests" 1
}
if {$nErr>0} {
  protOut "***** Ignored failures: $ignored_test" 1
}
Changes to test/th1-docs.test.
33
34
35
36
37
38
39

40


41
42
43
44
45
46
47
48
49
50

51
52

53
54
55
56
###############################################################################

set env(TH1_ENABLE_DOCS) 1; # TH1 docs must be enabled for this test.
set env(TH1_ENABLE_TCL) 1; # Tcl integration must be enabled for this test.

###############################################################################


set data [fossil info]


regexp -line -- {^repository:   (.*)$} $data dummy repository

if {[string length $repository] == 0 || ![file exists $repository]} then {
  error "unable to locate repository"
}

set dataFileName [file join $::testdir th1-docs-input.txt]

###############################################################################


set RESULT [test_fossil_http \
    $repository $dataFileName /doc/trunk/test/fileStat.th1]


test th1-docs-1a {[regexp {<title>Fossil: test/fileStat.th1</title>} $RESULT]}
test th1-docs-1b {[regexp {>\[[0-9a-f]{40}\]<} $RESULT]}
test th1-docs-1c {[regexp { contains \d+ files\.} $RESULT]}






>
|
>
>










>
|
|
>




33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
###############################################################################

set env(TH1_ENABLE_DOCS) 1; # TH1 docs must be enabled for this test.
set env(TH1_ENABLE_TCL) 1; # Tcl integration must be enabled for this test.

###############################################################################

run_in_checkout {
  set data [fossil info]
}

regexp -line -- {^repository:   (.*)$} $data dummy repository

if {[string length $repository] == 0 || ![file exists $repository]} then {
  error "unable to locate repository"
}

set dataFileName [file join $::testdir th1-docs-input.txt]

###############################################################################

run_in_checkout {
  set RESULT [test_fossil_http \
      $repository $dataFileName /doc/trunk/test/fileStat.th1]
}

test th1-docs-1a {[regexp {<title>Fossil: test/fileStat.th1</title>} $RESULT]}
test th1-docs-1b {[regexp {>\[[0-9a-f]{40}\]<} $RESULT]}
test th1-docs-1c {[regexp { contains \d+ files\.} $RESULT]}
Changes to test/th1-hooks.test.
22
23
24
25
26
27
28





29
30
31
32
33
34
35
if {$::RESULT ne "1"} then {
  puts "Fossil was not compiled with TH1 hooks support."; return
}

###############################################################################






set env(TH1_ENABLE_HOOKS) 1; # TH1 hooks must be enabled for this test.

###############################################################################

set testTh1Setup {
  proc initialize_hook_log {} {
    if {![info exists ::hook_log]} {






>
>
>
>
>







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
if {$::RESULT ne "1"} then {
  puts "Fossil was not compiled with TH1 hooks support."; return
}

###############################################################################

repo_init
write_file f1 "f1"; fossil add f1; fossil commit -m "c1"

###############################################################################

set env(TH1_ENABLE_HOOKS) 1; # TH1 hooks must be enabled for this test.

###############################################################################

set testTh1Setup {
  proc initialize_hook_log {} {
    if {![info exists ::hook_log]} {
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
test th1-cmd-hooks-2a {[first_data_line] eq \
    {<h1><b>command_hook timeline</b></h1>}}

test th1-cmd-hooks-2b {[second_data_line] eq {ERROR: unsupported timeline}}

###############################################################################

fossil timeline now
test th1-cmd-hooks-3a {[first_data_line] eq \
    {<h1><b>command_hook timeline</b></h1>}}

test th1-cmd-hooks-3b \
    {[regexp -- {=== \d{4}-\d{2}-\d{2} ===} [second_data_line]]}

test th1-cmd-hooks-3c \






|







133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
test th1-cmd-hooks-2a {[first_data_line] eq \
    {<h1><b>command_hook timeline</b></h1>}}

test th1-cmd-hooks-2b {[second_data_line] eq {ERROR: unsupported timeline}}

###############################################################################

fossil timeline -n -1 now
test th1-cmd-hooks-3a {[first_data_line] eq \
    {<h1><b>command_hook timeline</b></h1>}}

test th1-cmd-hooks-3b \
    {[regexp -- {=== \d{4}-\d{2}-\d{2} ===} [second_data_line]]}

test th1-cmd-hooks-3c \
169
170
171
172
173
174
175

176

177
178
179
180
181
182
183
fossil test4
test th1-custom-cmd-4a {[string trim $RESULT] eq \
    {<h1><b>command_hook test4</b></h1>}}

###############################################################################

set RESULT [test_fossil_http $repository $dataFileName /timeline]

test th1-web-hooks-1a {[regexp {<title>Fossil: Timeline</title>} $RESULT]}


test th1-web-hooks-1b {[regexp [appendArgs \
    {<h1><b>command_hook http webpage_hook timeline} " " \
    {webpage_notify timeline</b></h1>}] $RESULT]}

###############################################################################







>
|
>







174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
fossil test4
test th1-custom-cmd-4a {[string trim $RESULT] eq \
    {<h1><b>command_hook test4</b></h1>}}

###############################################################################

set RESULT [test_fossil_http $repository $dataFileName /timeline]

test th1-web-hooks-1a {[regexp \
    {<title>Unnamed Fossil Project: Timeline</title>} $RESULT]}

test th1-web-hooks-1b {[regexp [appendArgs \
    {<h1><b>command_hook http webpage_hook timeline} " " \
    {webpage_notify timeline</b></h1>}] $RESULT]}

###############################################################################

Added test/th1-repo.test.


















































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#
# Copyright (c) 2011 D. Richard Hipp
# Copyright (c) 2015 Ch. Drexler
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Simplified BSD License (also
# known as the "2-Clause License" or "FreeBSD License".)
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#
# Author contact information:
#   drh@hwaci.com
#   http://www.hwaci.com/drh/
#
#   Chris Drexler <ckolumbus@ac-drexler.de>
#
############################################################################
#
# TH1 tests that may modify the repository
#

catch {exec $::fossilexe info} res
if {![regexp {use --repository} $res]} {
  puts stderr "Cannot run this test within an open checkout"
  return
}

########################################
# Setup: Add Files and Commit          #
########################################

set rootDir [file normalize [pwd]]

repo_init

write_file f1.md  "f1"
write_file f2.md  "f2"
write_file f3.txt "f3"
write_file f4.md  "f4"

file mkdir [file join $rootDir subdirA]
# NOTE: There are no files in subdirA.

file mkdir [file join $rootDir subdirB]
write_file [file join $rootDir subdirB f5.md] "f5"
write_file [file join $rootDir subdirB f6.md] "f6"
write_file [file join $rootDir subdirB f7.txt] "f7"
write_file [file join $rootDir subdirB f8.md] "f8"
write_file [file join $rootDir subdirB f9.wiki] "f9"

file mkdir [file join $rootDir subdirC]
write_file [file join $rootDir subdirC f10.md] "f10"
write_file [file join $rootDir subdirC f11t.xt] "f11"

set files_md [list subdirB/f5.md subdirB/f6.md subdirB/f8.md subdirC/f10.md]

fossil add $rootDir
fossil commit -m "c1"

set dir [file dirname [info script]]

###############################################################################

fossil test-th-eval --open-config "dir trunk subdir*/*.md"
test th1-dir-1 {[llength $RESULT] eq [llength $files_md]}

set n 1
foreach i $RESULT j $files_md {
   test th1-dir-2.$n {$i eq $j}
   set n [expr {$n + 1}]
}

###############################################################################

set dateTime {\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}}
fossil test-th-eval --open-config "dir trunk subdir*/*.md 1"
test th1-dir-3.1 {[lindex [lindex $RESULT 0] 0] eq "subdirB/f5.md"}
test th1-dir-3.2 {[lindex [lindex $RESULT 0] 1] == 2}
test th1-dir-3.3 {[regexp -- $dateTime [lindex [lindex $RESULT 0] 2]]}
test th1-dir-3.4 {[lindex [lindex $RESULT 1] 0] eq "subdirB/f6.md"}
test th1-dir-3.5 {[lindex [lindex $RESULT 1] 1] == 2}
test th1-dir-3.6 {[regexp -- $dateTime [lindex [lindex $RESULT 1] 2]]}
test th1-dir-3.7 {[lindex [lindex $RESULT 2] 0] eq "subdirB/f8.md"}
test th1-dir-3.8 {[lindex [lindex $RESULT 2] 1] == 2}
test th1-dir-3.9 {[regexp -- $dateTime [lindex [lindex $RESULT 2] 2]]}
test th1-dir-3.10 {[lindex [lindex $RESULT 3] 0] eq "subdirC/f10.md"}
test th1-dir-3.11 {[lindex [lindex $RESULT 3] 1] == 3}
test th1-dir-3.12 {[regexp -- $dateTime [lindex [lindex $RESULT 3] 2]]}
Changes to test/th1-tcl.test.
18
19
20
21
22
23
24




25
26
27
28
29
30
31
# TH1/Tcl integration
#

set dir [file dirname [info script]]

###############################################################################





fossil test-th-eval "hasfeature tcl"

if {$::RESULT ne "1"} then {
  puts "Fossil was not compiled with Tcl support."; return
}

###############################################################################






>
>
>
>







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# TH1/Tcl integration
#

set dir [file dirname [info script]]

###############################################################################

repo_init

###############################################################################

fossil test-th-eval "hasfeature tcl"

if {$::RESULT ne "1"} then {
  puts "Fossil was not compiled with Tcl support."; return
}

###############################################################################
56
57
58
59
60
61
62

63
64
65
66



67
68
69
70
71
72
73
4
\d+
one_word
three words now$} [normalize_result]]}

###############################################################################


fossil test-th-render --open-config \
    [file nativename [file join $dir th1-tcl2.txt]]

test th1-tcl-2 {[regexp -- {^\d+$} [normalize_result]]}




###############################################################################

fossil test-th-render --open-config \
    [file nativename [file join $dir th1-tcl3.txt]]

test th1-tcl-3 {$RESULT eq {<hr><p class="thmainError">ERROR:\






>
|
|

|
>
>
>







60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
4
\d+
one_word
three words now$} [normalize_result]]}

###############################################################################

if {[catch {package require sqlite3}] == 0} {
  fossil test-th-render --open-config \
      [file nativename [file join $dir th1-tcl2.txt]]

  test th1-tcl-2 {[regexp -- {^\d+$} [normalize_result]]}
} else {
  puts stderr "Skipping 'th1-tcl-2', SQLite package for Tcl not available"
}

###############################################################################

fossil test-th-render --open-config \
    [file nativename [file join $dir th1-tcl3.txt]]

test th1-tcl-3 {$RESULT eq {<hr><p class="thmainError">ERROR:\
Changes to test/th1.test.
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#   http://www.hwaci.com/drh/
#
############################################################################
#
# TH1 Commands
#

set dir [file dirname [info script]]

###############################################################################

fossil test-th-eval --open-config "setting th1-hooks"
set th1Hooks [expr {$RESULT eq "1"}]

###############################################################################

fossil test-th-eval --open-config "setting abc"
test th1-setting-1 {$RESULT eq ""}

###############################################################################






|



|
|







14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#   http://www.hwaci.com/drh/
#
############################################################################
#
# TH1 Commands
#

set dir [file dirname [info script]]; repo_init

###############################################################################

set th1Tcl [is_tcl_usable_by_fossil]
set th1Hooks [are_th1_hooks_usable_by_fossil]

###############################################################################

fossil test-th-eval --open-config "setting abc"
test th1-setting-1 {$RESULT eq ""}

###############################################################################
552
553
554
555
556
557
558


559


560
561
562
563


564






565
566
567
568
569
570
571
###############################################################################

fossil test-th-eval "lindex list -0x"
test th1-expr-49 {$RESULT eq {TH_ERROR: expected integer, got: "-0x"}}

###############################################################################



fossil test-th-eval "checkout 1"; # NOTE: Assumes running "in tree".


test th1-checkout-1 {[string length $RESULT] > 0}

###############################################################################



fossil test-th-eval "checkout"; # NOTE: Assumes running "in tree".






test th1-checkout-2 {[string length $RESULT] > 0}

###############################################################################

set savedPwd [pwd]; cd /
fossil test-th-eval "checkout 1"
cd $savedPwd; unset savedPwd






>
>
|
>
>




>
>
|
>
>
>
>
>
>







552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
###############################################################################

fossil test-th-eval "lindex list -0x"
test th1-expr-49 {$RESULT eq {TH_ERROR: expected integer, got: "-0x"}}

###############################################################################

run_in_checkout {
  # NOTE: The "1" here forces the checkout to be opened.
  fossil test-th-eval "checkout 1"
}

test th1-checkout-1 {[string length $RESULT] > 0}

###############################################################################

run_in_checkout {
  if {$th1Hooks} {
    fossil test-th-eval "checkout"
  } else {
    # NOTE: No TH1 hooks, force checkout to be populated.
    fossil test-th-eval --open-config "checkout"
  }
}

test th1-checkout-2 {[string length $RESULT] > 0}

###############################################################################

set savedPwd [pwd]; cd /
fossil test-th-eval "checkout 1"
cd $savedPwd; unset savedPwd
637
638
639
640
641
642
643

644


645
646
647
648
649
650
651
###############################################################################

fossil test-th-eval "styleHeader {Page Title Here}"
test th1-header-1 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################


fossil test-th-eval --open-config "styleHeader {Page Title Here}"


test th1-header-2 {[regexp -- {<title>Fossil: Page Title Here</title>} $RESULT]}

###############################################################################

fossil test-th-eval "styleFooter"
test th1-footer-1 {$RESULT eq {TH_ERROR: repository unavailable}}







>
|
>
>







649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
###############################################################################

fossil test-th-eval "styleHeader {Page Title Here}"
test th1-header-1 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

run_in_checkout {
  fossil test-th-eval --open-config "styleHeader {Page Title Here}"
}

test th1-header-2 {[regexp -- {<title>Fossil: Page Title Here</title>} $RESULT]}

###############################################################################

fossil test-th-eval "styleFooter"
test th1-footer-1 {$RESULT eq {TH_ERROR: repository unavailable}}

720
721
722
723
724
725
726

727


728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746

747


748
749
750
751
752
753
754
755
756
757
758
759
760
761


762






763
764
765
766


767
768








769
770
771
772
773
774
775
###############################################################################

fossil test-th-eval "artifact tip"
test th1-artifact-2 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################


fossil test-th-eval --open-config "artifact tip"


test th1-artifact-3 {[regexp -- {F test/th1\.test [0-9a-f]{40}} $RESULT]}

###############################################################################

fossil test-th-eval "artifact 0000000000"
test th1-artifact-4 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

fossil test-th-eval --open-config "artifact 0000000000"
test th1-artifact-5 {$RESULT eq {TH_ERROR: name not found}}

###############################################################################

fossil test-th-eval "artifact tip test/th1.test"
test th1-artifact-6 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################


fossil test-th-eval --open-config "artifact tip test/th1.test"


test th1-artifact-7 {[regexp -- {th1-artifact-7} $RESULT]}

###############################################################################

fossil test-th-eval "artifact 0000000000 test/th1.test"
test th1-artifact-8 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

fossil test-th-eval --open-config "artifact 0000000000 test/th1.test"
test th1-artifact-9 {$RESULT eq {TH_ERROR: manifest not found}}

###############################################################################



fossil test-th-eval "globalState checkout"






test th1-globalState-1 {[string length $RESULT] > 0}

###############################################################################



fossil test-th-eval "globalState checkout"
test th1-globalState-2 {$RESULT eq [fossil test-th-eval checkout]}









###############################################################################

fossil test-th-eval "globalState configuration"
test th1-globalState-3 {[string length $RESULT] == 0}

###############################################################################






>
|
>
>



















>
|
>
>














>
>
|
>
>
>
>
>
>




>
>
|
|
>
>
>
>
>
>
>
>







735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
###############################################################################

fossil test-th-eval "artifact tip"
test th1-artifact-2 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

run_in_checkout {
  fossil test-th-eval --open-config "artifact tip"
}

test th1-artifact-3 {[regexp -- {F test/th1\.test [0-9a-f]{40}} $RESULT]}

###############################################################################

fossil test-th-eval "artifact 0000000000"
test th1-artifact-4 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

fossil test-th-eval --open-config "artifact 0000000000"
test th1-artifact-5 {$RESULT eq {TH_ERROR: name not found}}

###############################################################################

fossil test-th-eval "artifact tip test/th1.test"
test th1-artifact-6 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

run_in_checkout {
  fossil test-th-eval --open-config "artifact tip test/th1.test"
}

test th1-artifact-7 {[regexp -- {th1-artifact-7} $RESULT]}

###############################################################################

fossil test-th-eval "artifact 0000000000 test/th1.test"
test th1-artifact-8 {$RESULT eq {TH_ERROR: repository unavailable}}

###############################################################################

fossil test-th-eval --open-config "artifact 0000000000 test/th1.test"
test th1-artifact-9 {$RESULT eq {TH_ERROR: manifest not found}}

###############################################################################

run_in_checkout {
  if {$th1Hooks} {
    fossil test-th-eval "globalState checkout"
  } else {
    # NOTE: No TH1 hooks, force checkout to be populated.
    fossil test-th-eval --open-config "globalState checkout"
  }
}

test th1-globalState-1 {[string length $RESULT] > 0}

###############################################################################

run_in_checkout {
  if {$th1Hooks} {
    fossil test-th-eval "globalState checkout"
    test th1-globalState-2 {$RESULT eq [fossil test-th-eval checkout]}
  } else {
    # NOTE: No TH1 hooks, force checkout to be populated.
    fossil test-th-eval --open-config "globalState checkout"

    test th1-globalState-2 {$RESULT eq \
        [fossil test-th-eval --open-config checkout]}
  }
}

###############################################################################

fossil test-th-eval "globalState configuration"
test th1-globalState-3 {[string length $RESULT] == 0}

###############################################################################
790
791
792
793
794
795
796


797






798
799
800
801


802
803








804
805
806
807
808
809
810
###############################################################################

fossil test-th-eval --errorlog foserrors.log "globalState log"
test th1-globalState-7 {$RESULT eq "foserrors.log"}

###############################################################################



fossil test-th-eval "globalState repository"






test th1-globalState-8 {[string length $RESULT] > 0}

###############################################################################



fossil test-th-eval "globalState repository"
test th1-globalState-9 {$RESULT eq [fossil test-th-eval repository]}









###############################################################################

fossil test-th-eval "globalState top"
test th1-globalState-10 {[string length $RESULT] == 0}

###############################################################################






>
>
|
>
>
>
>
>
>




>
>
|
|
>
>
>
>
>
>
>
>







829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
###############################################################################

fossil test-th-eval --errorlog foserrors.log "globalState log"
test th1-globalState-7 {$RESULT eq "foserrors.log"}

###############################################################################

run_in_checkout {
  if {$th1Hooks} {
    fossil test-th-eval "globalState repository"
  } else {
    # NOTE: No TH1 hooks, force repository to be populated.
    fossil test-th-eval --open-config "globalState repository"
  }
}

test th1-globalState-8 {[string length $RESULT] > 0}

###############################################################################

run_in_checkout {
  if {$th1Hooks} {
    fossil test-th-eval "globalState repository"
    test th1-globalState-9 {$RESULT eq [fossil test-th-eval repository]}
  } else {
    # NOTE: No TH1 hooks, force repository to be populated.
    fossil test-th-eval --open-config "globalState repository"

    test th1-globalState-9 {$RESULT eq \
        [fossil test-th-eval --open-config repository]}
  }
}

###############################################################################

fossil test-th-eval "globalState top"
test th1-globalState-10 {[string length $RESULT] == 0}

###############################################################################
854
855
856
857
858
859
860
861

862
863
864
865


866



867
868
869
870






871
872
873
874
875





876

877
878
879
880


881




882
883
884
885





886

887
888
889
890
891
892
893
894
895


896



























































897
898
899
900
901
902
903
fossil test-th-eval "reinitialize 1; globalState configuration"
test th1-reinitialize-2 {$RESULT ne ""}

###############################################################################

#
# NOTE: This test may fail if the command names do not always come

#       out in a deterministic order from TH1.
#
fossil test-th-eval "info commands"
test th1-info-commands-1 {$RESULT eq {linecount htmlize date stime\


enable_output uplevel http expr glob_match utime styleFooter catch if\



tclReady searchable reinitialize combobox lindex query html anoncap randhex\
llength for set break regexp markdown styleHeader puts return checkout\
decorate artifact trace wiki proc hascap globalState continue getParameter\
hasfeature setting lsearch breakpoint upvar render repository string unset\






setParameter list error info rename anycap httpize}}

###############################################################################

fossil test-th-eval "info vars"





test th1-info-vars-1 {$RESULT eq ""}


###############################################################################

fossil test-th-eval "set x 1; info vars"


test th1-info-vars-2 {$RESULT eq "x"}





###############################################################################

fossil test-th-eval "set x 1; unset x; info vars"





test th1-info-vars-3 {$RESULT eq ""}


###############################################################################

fossil test-th-eval "proc foo {} {set x 1; info vars}; foo"
test th1-info-vars-4 {$RESULT eq "x"}

###############################################################################

fossil test-th-eval "set y 1; proc foo {} {set x 1; uplevel 1 {info vars}}; foo"


test th1-info-vars-5 {$RESULT eq "y"}




























































###############################################################################

fossil test-th-eval "lsearch"
test th1-lsearch-1 {$RESULT eq \
    {TH_ERROR: wrong # args: should be "lsearch list string"}}







|
>
|


|
>
>
|
>
>
>
|
<
|
|
>
>
>
>
>
>
|




>
>
>
>
>
|
>




>
>
|
>
>
>
>




>
>
>
>
>
|
>









>
>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930

931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
fossil test-th-eval "reinitialize 1; globalState configuration"
test th1-reinitialize-2 {$RESULT ne ""}

###############################################################################

#
# NOTE: This test will fail if the command names are added to TH1, or
#       moved from Tcl builds to plain or the reverse. Sorting the 
#       command lists eliminates a dependence on order.
#
fossil test-th-eval "info commands"
set sorted_result [lsort $RESULT]
protOut "Sorted: $sorted_result"
set base_commands {anoncap anycap array artifact break breakpoint catch\
      checkout combobox continue date decorate dir enable_output encode64\
      error expr for getParameter glob_match globalState hascap hasfeature\
      html htmlize http httpize if info insertCsrf lindex linecount list\
      llength lsearch markdown proc puts query randhex redirect regexp\
      reinitialize rename render repository return searchable set\

      setParameter setting stime string styleFooter styleHeader tclReady\
      trace unset uplevel upvar utime verifyCsrf wiki}
set tcl_commands {tclEval tclExpr tclInvoke tclIsSafe tclMakeSafe}
if {$th1Tcl} {
  test th1-info-commands-1 {$sorted_result eq [lsort "$base_commands $tcl_commands"]}
} else {
  test th1-info-commands-1 {$sorted_result eq [lsort "$base_commands"]}
}


###############################################################################

fossil test-th-eval "info vars"

if {$th1Hooks} {
  test th1-info-vars-1 {[lsort $RESULT] eq \
      [lsort "th_stack_trace cmd_flags tcl_platform cmd_name cmd_args"]}
} else {
  test th1-info-vars-1 {$RESULT eq "tcl_platform"}
}

###############################################################################

fossil test-th-eval "set x 1; info vars"

if {$th1Hooks} {
  test th1-info-vars-2 {[lsort $RESULT] eq \
      [lsort "x th_stack_trace cmd_flags tcl_platform cmd_name cmd_args"]}
} else {
  test th1-info-vars-2 {[lsort $RESULT] eq [lsort "x tcl_platform"]}
}

###############################################################################

fossil test-th-eval "set x 1; unset x; info vars"

if {$th1Hooks} {
  test th1-info-vars-3 {[lsort $RESULT] eq \
      [lsort "th_stack_trace cmd_flags tcl_platform cmd_name cmd_args"]}
} else {
  test th1-info-vars-3 {$RESULT eq "tcl_platform"}
}

###############################################################################

fossil test-th-eval "proc foo {} {set x 1; info vars}; foo"
test th1-info-vars-4 {$RESULT eq "x"}

###############################################################################

fossil test-th-eval "set y 1; proc foo {} {set x 1; uplevel 1 {info vars}}; foo"

if {$th1Hooks} {
  test th1-info-vars-5 {[lsort $RESULT] eq \
      [lsort "th_stack_trace y cmd_flags tcl_platform cmd_name cmd_args"]}
} else {
  test th1-info-vars-5 {[lsort $RESULT] eq [lsort "y tcl_platform"]}
}

###############################################################################

fossil test-th-eval "array exists foo"
test th1-array-exists-1 {$RESULT eq "0"}

###############################################################################

fossil test-th-eval "set foo(x) 1; array exists foo"
test th1-array-exists-2 {$RESULT eq "1"}

###############################################################################

fossil test-th-eval "set foo(x) 1; unset foo(x); array exists foo"
test th1-array-exists-3 {$RESULT eq "1"}

###############################################################################

fossil test-th-eval "set foo(x) 1; unset foo; array exists foo"
test th1-array-exists-4 {$RESULT eq "0"}

###############################################################################

fossil test-th-eval "set foo 1; array exists foo"
test th1-array-exists-5 {$RESULT eq "0"}

###############################################################################

fossil test-th-eval "array names foo"
test th1-array-names-1 {$RESULT eq ""}

###############################################################################

fossil test-th-eval "set foo 2; array names foo"
test th1-array-names-2 {$RESULT eq ""}

###############################################################################

fossil test-th-eval "set foo 2; unset foo; set foo(x) 2; array names foo"
test th1-array-names-3 {$RESULT eq "x"}

###############################################################################

fossil test-th-eval "set foo(x) 2; array names foo"
test th1-array-names-4 {$RESULT eq "x"}

###############################################################################

fossil test-th-eval "set foo(x) 2; set foo(y) 2; array names foo"
test th1-array-names-5 {$RESULT eq "x y"}

###############################################################################

fossil test-th-eval "set foo(x) 2; unset foo(x); array names foo"
test th1-array-names-6 {$RESULT eq ""}

###############################################################################

fossil test-th-eval "lsearch"
test th1-lsearch-1 {$RESULT eq \
    {TH_ERROR: wrong # args: should be "lsearch list string"}}

1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
}}}

###############################################################################

fossil test-th-eval {markdown "Test1\n=====\n*This is a test.*"}
test th1-markdown-4 {[normalize_result] eq {Test1 {<div class="markdown">

<h1>Test1</h1>
<p><em>This is a test.</em></p>

</div>
}}}

###############################################################################

set markdown [read_file [file join $dir markdown-test1.md]]
fossil test-th-eval [string map \
    [list %markdown% $markdown] {markdown {%markdown%}}]
test th1-markdown-5 {[normalize_result] eq \
{{Markdown Formatter Test Document} {<div class="markdown">

<h1>Markdown Formatter Test Document</h1>
<p>This document is designed to test the markdown formatter.</p>

<ul>
<li>A bullet item.

<ul>
<li>A subitem</li>






<













<







1348
1349
1350
1351
1352
1353
1354

1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367

1368
1369
1370
1371
1372
1373
1374
}}}

###############################################################################

fossil test-th-eval {markdown "Test1\n=====\n*This is a test.*"}
test th1-markdown-4 {[normalize_result] eq {Test1 {<div class="markdown">


<p><em>This is a test.</em></p>

</div>
}}}

###############################################################################

set markdown [read_file [file join $dir markdown-test1.md]]
fossil test-th-eval [string map \
    [list %markdown% $markdown] {markdown {%markdown%}}]
test th1-markdown-5 {[normalize_result] eq \
{{Markdown Formatter Test Document} {<div class="markdown">


<p>This document is designed to test the markdown formatter.</p>

<ul>
<li>A bullet item.

<ul>
<li>A subitem</li>
1243
1244
1245
1246
1247
1248
1249





























































<p>Another paragraph.</p>

<h2>Other Features</h2>
<p>Text can show <em>emphasis</em> or <em>emphasis</em> or <strong>strong emphassis</strong>.</p>

</div>
}}}



































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
<p>Another paragraph.</p>

<h2>Other Features</h2>
<p>Text can show <em>emphasis</em> or <em>emphasis</em> or <strong>strong emphassis</strong>.</p>

</div>
}}}

###############################################################################

fossil test-th-eval {encode64 test}
test th1-encode64-1 {$RESULT eq "dGVzdA=="}

###############################################################################

fossil test-th-eval {encode64 test\x00}
test th1-encode64-2 {$RESULT eq "dGVzdAA="}

###############################################################################

#
# TODO: Modify the result of this test if the source file (i.e.
#       "ajax/cgi-bin/fossil-json.cgi.example") changes.
#
run_in_checkout {
  fossil test-th-eval --open-config \
      {encode64 [artifact trunk ajax/cgi-bin/fossil-json.cgi.example]}
}

test th1-encode64-3 {$RESULT eq \
"IyEvcGF0aC90by9mb3NzaWwvYmluYXJ5CnJlcG9zaXRvcnk6IC9wYXRoL3RvL3JlcG8uZnNsCg=="}

###############################################################################

fossil test-th-eval {array exists tcl_platform}
test th1-platform-1 {$RESULT eq "1"}

###############################################################################

fossil test-th-eval {array names tcl_platform}
test th1-platform-2 {$RESULT eq "engine platform"}

###############################################################################

fossil test-th-eval {set tcl_platform(engine)}
test th1-platform-3 {$RESULT eq "TH1"}

###############################################################################

fossil test-th-eval {set tcl_platform(platform)}
test th1-platform-4 {$RESULT eq "windows" || $RESULT eq "unix"}

###############################################################################

set th1FileName [file join $::tempPath th1-[pid].th1]

write_file $th1FileName {
  set x ""
  for {set i 0} {$i < 10} {set i [expr {$i + 1}]} {
    set x "$x $i"
  }
  return [string trim $x]
  set y; # NOTE: Never hit.
}

fossil test-th-source $th1FileName
test th1-source-1 {$RESULT eq {TH_RETURN: 0 1 2 3 4 5 6 7 8 9}}
file delete $th1FileName
Changes to test/utf.test.
37
38
39
40
41
42
43

44
45
46
47
48
49
50

51
52
53
54
55
56
57
58
59

60
61
62
63
64
65
66
    set result [string map [list %TEMP% $tempPath \r\n \n] $result]
    # if {$::RESULT ne $result} {puts stdout $::RESULT}
    test utf-check-$testname.$i {$::RESULT eq $result}
    incr i
  }
}


array set enc [list     \
      0 binary          \
      1 binary          \
      2 unicode         \
      3 unicode-reverse \
]


array set bom [list                                         \
      0 ""                                                  \
      1 \xEF\xBB\xBF                                        \
      2 [expr {$tcl_platform(byteOrder) eq "littleEndian" ? \
            "\xFF\xFE" : "\xFE\xFF"}]                       \
      3 [expr {$tcl_platform(byteOrder) eq "littleEndian" ? \
            "\xFE\xFF" : "\xFF\xFE"}]                       \
]


array set data [list                          \
      0 ""                                    \
      1 \r                                    \
      2 \n                                    \
      3 \r\n                                  \
      4 \rA                                   \
      5 \rAB                                  \






>







>









>







37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
    set result [string map [list %TEMP% $tempPath \r\n \n] $result]
    # if {$::RESULT ne $result} {puts stdout $::RESULT}
    test utf-check-$testname.$i {$::RESULT eq $result}
    incr i
  }
}

unset -nocomplain enc
array set enc [list     \
      0 binary          \
      1 binary          \
      2 unicode         \
      3 unicode-reverse \
]

unset -nocomplain bom
array set bom [list                                         \
      0 ""                                                  \
      1 \xEF\xBB\xBF                                        \
      2 [expr {$tcl_platform(byteOrder) eq "littleEndian" ? \
            "\xFF\xFE" : "\xFE\xFF"}]                       \
      3 [expr {$tcl_platform(byteOrder) eq "littleEndian" ? \
            "\xFE\xFF" : "\xFF\xFE"}]                       \
]

unset -nocomplain data
array set data [list                          \
      0 ""                                    \
      1 \r                                    \
      2 \n                                    \
      3 \r\n                                  \
      4 \rA                                   \
      5 \rAB                                  \
237
238
239
240
241
242
243

244
245
246
247
248
249
250
    176 \xF4\x8F\xBF\xBF\r\n                  \
    177 \xF4\x90\x80\x80\x00                  \
    178 \xF4\x90\x80\x80\r                    \
    179 \xF4\x90\x80\x80\n                    \
    180 \xF4\x90\x80\x80\r\n                  \
]


array set extraData [list                     \
      0 ""                                    \
      1 Z                                     \
]

proc deleteTestFiles {path num} {
  set fn $num






>







240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
    176 \xF4\x8F\xBF\xBF\r\n                  \
    177 \xF4\x90\x80\x80\x00                  \
    178 \xF4\x90\x80\x80\r                    \
    179 \xF4\x90\x80\x80\n                    \
    180 \xF4\x90\x80\x80\r\n                  \
]

unset -nocomplain extraData
array set extraData [list                     \
      0 ""                                    \
      1 Z                                     \
]

proc deleteTestFiles {path num} {
  set fn $num
Changes to tools/cvs2fossil/changeset.
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# individuals.  For exact contribution history, see the revision
# history and logs, available at http://fossil-scm.hwaci.com/fossil
# # ## ### ##### ######## ############# #####################

## Helper application, debugging of cvs2fossil. This application
## extracts all information about a changeset and writes it nicely
## formatted to stdout. The changeset is specified by its internal
## numerical id. 

# # ## ### ##### ######## ############# #####################
## Requirements, extended package management for local packages.

lappend auto_path [file join [file dirname [info script]] lib]

package require Tcl 8.4                               ; # Required runtime.






|







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# individuals.  For exact contribution history, see the revision
# history and logs, available at http://fossil-scm.hwaci.com/fossil
# # ## ### ##### ######## ############# #####################

## Helper application, debugging of cvs2fossil. This application
## extracts all information about a changeset and writes it nicely
## formatted to stdout. The changeset is specified by its internal
## numerical id.

# # ## ### ##### ######## ############# #####################
## Requirements, extended package management for local packages.

lappend auto_path [file join [file dirname [info script]] lib]

package require Tcl 8.4                               ; # Required runtime.
Changes to tools/cvs2fossil/lib/c2f_pbreakacycle.tcl.
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
	foreach item [array names limits] {
	    set mins $minsa($item)
	    set maxp $maxp($item)
	    # Note that for the min successor position "" represents
	    # +infinity
	    integrity assert {
		($mins eq "") || ($maxp < $mins) 
	    } {Item <$item> is backward at file level ($maxp >= $mins)}
	}

	# Save the limits for the splitter, and compute the border at
	# which to split as the minimum of all minimal successor
	# positions.







|







268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
	foreach item [array names limits] {
	    set mins $minsa($item)
	    set maxp $maxp($item)
	    # Note that for the min successor position "" represents
	    # +infinity
	    integrity assert {
		($mins eq "") || ($maxp < $mins)
	    } {Item <$item> is backward at file level ($maxp >= $mins)}
	}

	# Save the limits for the splitter, and compute the border at
	# which to split as the minimum of all minimal successor
	# positions.

Changes to tools/cvs2fossil/lib/c2f_prev.tcl.
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
    # List of all known changesets of a type.
    typevariable mytchangesets -array {
	sym::branch {}
	sym::tag    {}
	rev         {}
    }
					
    typevariable myitemmap     -array {} ; # Map from items (tagged)
					   # to the list of changesets
					   # containing it. Each item
					   # can be used by only one
					   # changeset.
    typevariable myidmap   -array {} ; # Map from changeset id to
				       # changeset.






|







1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
    # List of all known changesets of a type.
    typevariable mytchangesets -array {
	sym::branch {}
	sym::tag    {}
	rev         {}
    }

    typevariable myitemmap     -array {} ; # Map from items (tagged)
					   # to the list of changesets
					   # containing it. Each item
					   # can be used by only one
					   # changeset.
    typevariable myidmap   -array {} ; # Map from changeset id to
				       # changeset.
Changes to tools/cvs2fossil/lib/mem.tcl.
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
	variable lcba
	variable lmba
	variable mid

	struct::list assign [minfo] _ _ _ cba _ mba

	set dc [expr $cba - $lcba] ; set lcba $cba	
	set dm [expr $mba - $lmba] ; set lmba $mba	

	# projection: 1          2 3          4 5         6 7          6 8         10
	return "[F [incr mid]] | [F $cba] | [F $dc] | [F $mba] | [F $dm] |=| "
    }

    proc mark {} {
	variable track ; if {!$track} return






|
|







46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
	variable lcba
	variable lmba
	variable mid

	struct::list assign [minfo] _ _ _ cba _ mba

	set dc [expr $cba - $lcba] ; set lcba $cba
	set dm [expr $mba - $lmba] ; set lmba $mba

	# projection: 1          2 3          4 5         6 7          6 8         10
	return "[F [incr mid]] | [F $cba] | [F $dc] | [F $mba] | [F $dm] |=| "
    }

    proc mark {} {
	variable track ; if {!$track} return
Changes to tools/fossilwiki.
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
	while ( $text =~ m/\[([^][]+)\]/g )
	{
		push @links,$1;
	}

	$numlinks = $#links;

	if (@links == ()) 
	{
		push @terminals, $page;
	}
	else
	{
		my @internals = grep { $_ !~ /(http:)|(mailto:)|(https:)/ } @links;
		if (@internals == ()) 
		{
			push @nointernals, $page;
		}
		else
		{
			@{$links{$page}{'links'}} = map {my ($a,$b) = split /\|/; $a;} @internals;
			foreach $internal ( @internals )






|






|







45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
	while ( $text =~ m/\[([^][]+)\]/g )
	{
		push @links,$1;
	}

	$numlinks = $#links;

	if (@links == ())
	{
		push @terminals, $page;
	}
	else
	{
		my @internals = grep { $_ !~ /(http:)|(mailto:)|(https:)/ } @links;
		if (@internals == ())
		{
			push @nointernals, $page;
		}
		else
		{
			@{$links{$page}{'links'}} = map {my ($a,$b) = split /\|/; $a;} @internals;
			foreach $internal ( @internals )
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
foreach $link ( keys %badlinks )
{
	print ("badlink: '$link'\n");
}
foreach $page ( sort keys %links )
{
	my @links = @{$links{$page}{'links'}};
	if (@links != ()) 
	{
		if ($page eq $mainpage)
		{
			print "links: *** '$page' *** -> ", join (", ", @links), "\n";
		}
		else
		{
			print "links: '$page' -> ", join (", ", @links), "\n";
		}
	}
}






|











114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
foreach $link ( keys %badlinks )
{
	print ("badlink: '$link'\n");
}
foreach $page ( sort keys %links )
{
	my @links = @{$links{$page}{'links'}};
	if (@links != ())
	{
		if ($page eq $mainpage)
		{
			print "links: *** '$page' *** -> ", join (", ", @links), "\n";
		}
		else
		{
			print "links: '$page' -> ", join (", ", @links), "\n";
		}
	}
}
Changes to win/Makefile.PellesCGMake.
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
UTILS_OBJ=$(UTILS:.exe=.obj)
UTILS_SRC=$(foreach uf,$(UTILS),$(SRCDIR)$(uf:.exe=.c))

# define the SQLite files, which need special flags on compile
SQLITESRC=sqlite3.c
ORIGSQLITESRC=$(foreach sf,$(SQLITESRC),$(SRCDIR)$(sf))
SQLITEOBJ=$(foreach sf,$(SQLITESRC),$(sf:.c=.obj))
SQLITEDEFINES=-DNDEBUG=1 -DSQLITE_OMIT_LOAD_EXTENSION=1 -DSQLITE_ENABLE_LOCKING_STYLE=0 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -DSQLITE_OMIT_DEPRECATED -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_DBSTAT_VTAB -DSQLITE_WIN32_NO_ANSI

# define the SQLite shell files, which need special flags on compile
SQLITESHELLSRC=shell.c
ORIGSQLITESHELLSRC=$(foreach sf,$(SQLITESHELLSRC),$(SRCDIR)$(sf))
SQLITESHELLOBJ=$(foreach sf,$(SQLITESHELLSRC),$(sf:.c=.obj))
SQLITESHELLDEFINES=-Dmain=sqlite3_shell -DSQLITE_OMIT_LOAD_EXTENSION=1 -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) -DSQLITE_SHELL_DBNAME_PROC=fossil_open -Daccess=file_access -Dsystem=fossil_system -Dgetenv=fossil_getenv -Dfopen=fossil_fopen







|







81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
UTILS_OBJ=$(UTILS:.exe=.obj)
UTILS_SRC=$(foreach uf,$(UTILS),$(SRCDIR)$(uf:.exe=.c))

# define the SQLite files, which need special flags on compile
SQLITESRC=sqlite3.c
ORIGSQLITESRC=$(foreach sf,$(SQLITESRC),$(SRCDIR)$(sf))
SQLITEOBJ=$(foreach sf,$(SQLITESRC),$(sf:.c=.obj))
SQLITEDEFINES=-DNDEBUG=1 -DSQLITE_OMIT_LOAD_EXTENSION=1 -DSQLITE_ENABLE_LOCKING_STYLE=0 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -DSQLITE_OMIT_DEPRECATED -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_DBSTAT_VTAB -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5 -DSQLITE_WIN32_NO_ANSI

# define the SQLite shell files, which need special flags on compile
SQLITESHELLSRC=shell.c
ORIGSQLITESHELLSRC=$(foreach sf,$(SQLITESHELLSRC),$(SRCDIR)$(sf))
SQLITESHELLOBJ=$(foreach sf,$(SQLITESHELLSRC),$(sf:.c=.obj))
SQLITESHELLDEFINES=-Dmain=sqlite3_shell -DSQLITE_OMIT_LOAD_EXTENSION=1 -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) -DSQLITE_SHELL_DBNAME_PROC=fossil_open -Daccess=file_access -Dsystem=fossil_system -Dgetenv=fossil_getenv -Dfopen=fossil_fopen

Changes to win/Makefile.dmc.
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
SSL    =

CFLAGS = -o
BCC    = $(DMDIR)\bin\dmc $(CFLAGS)
TCC    = $(DMDIR)\bin\dmc $(CFLAGS) $(DMCDEF) $(SSL) $(INCL)
LIBS   = $(DMDIR)\extra\lib\ zlib wsock32 advapi32

SQLITE_OPTIONS = -DNDEBUG=1 -DSQLITE_OMIT_LOAD_EXTENSION=1 -DSQLITE_ENABLE_LOCKING_STYLE=0 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -DSQLITE_OMIT_DEPRECATED -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_DBSTAT_VTAB

SHELL_OPTIONS = -Dmain=sqlite3_shell -DSQLITE_OMIT_LOAD_EXTENSION=1 -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) -DSQLITE_SHELL_DBNAME_PROC=fossil_open -Daccess=file_access -Dsystem=fossil_system -Dgetenv=fossil_getenv -Dfopen=fossil_fopen

SRC   = add_.c allrepo_.c attach_.c bag_.c bisect_.c blob_.c branch_.c browse_.c builtin_.c bundle_.c cache_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c doc_.c encode_.c event_.c export_.c file_.c finfo_.c foci_.c fusefs_.c glob_.c graph_.c gzip_.c http_.c http_socket_.c http_ssl_.c http_transport_.c import_.c info_.c json_.c json_artifact_.c json_branch_.c json_config_.c json_diff_.c json_dir_.c json_finfo_.c json_login_.c json_query_.c json_report_.c json_status_.c json_tag_.c json_timeline_.c json_user_.c json_wiki_.c leaf_.c loadctrl_.c login_.c lookslike_.c main_.c manifest_.c markdown_.c markdown_html_.c md5_.c merge_.c merge3_.c moderate_.c name_.c path_.c piechart_.c pivot_.c popen_.c pqueue_.c printf_.c publish_.c purge_.c rebuild_.c regexp_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c shun_.c sitemap_.c skins_.c sqlcmd_.c stash_.c stat_.c statrep_.c style_.c sync_.c tag_.c tar_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c unicode_.c update_.c url_.c user_.c utf8_.c util_.c verify_.c vfile_.c wiki_.c wikiformat_.c winfile_.c winhttp_.c wysiwyg_.c xfer_.c xfersetup_.c zip_.c 

OBJ   = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\bisect$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\builtin$O $(OBJDIR)\bundle$O $(OBJDIR)\cache$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\export$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\foci$O $(OBJDIR)\fusefs$O $(OBJDIR)\glob$O $(OBJDIR)\graph$O $(OBJDIR)\gzip$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\import$O $(OBJDIR)\info$O $(OBJDIR)\json$O $(OBJDIR)\json_artifact$O $(OBJDIR)\json_branch$O $(OBJDIR)\json_config$O $(OBJDIR)\json_diff$O $(OBJDIR)\json_dir$O $(OBJDIR)\json_finfo$O $(OBJDIR)\json_login$O $(OBJDIR)\json_query$O $(OBJDIR)\json_report$O $(OBJDIR)\json_status$O $(OBJDIR)\json_tag$O $(OBJDIR)\json_timeline$O $(OBJDIR)\json_user$O $(OBJDIR)\json_wiki$O $(OBJDIR)\leaf$O $(OBJDIR)\loadctrl$O $(OBJDIR)\login$O $(OBJDIR)\lookslike$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\markdown$O $(OBJDIR)\markdown_html$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\moderate$O $(OBJDIR)\name$O $(OBJDIR)\path$O $(OBJDIR)\piechart$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\publish$O $(OBJDIR)\purge$O $(OBJDIR)\rebuild$O $(OBJDIR)\regexp$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\shun$O $(OBJDIR)\sitemap$O $(OBJDIR)\skins$O $(OBJDIR)\sqlcmd$O $(OBJDIR)\stash$O $(OBJDIR)\stat$O $(OBJDIR)\statrep$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\tar$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\unicode$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\utf8$O $(OBJDIR)\util$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winfile$O $(OBJDIR)\winhttp$O $(OBJDIR)\wysiwyg$O $(OBJDIR)\xfer$O $(OBJDIR)\xfersetup$O $(OBJDIR)\zip$O $(OBJDIR)\shell$O $(OBJDIR)\sqlite3$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O 







|







22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
SSL    =

CFLAGS = -o
BCC    = $(DMDIR)\bin\dmc $(CFLAGS)
TCC    = $(DMDIR)\bin\dmc $(CFLAGS) $(DMCDEF) $(SSL) $(INCL)
LIBS   = $(DMDIR)\extra\lib\ zlib wsock32 advapi32

SQLITE_OPTIONS = -DNDEBUG=1 -DSQLITE_OMIT_LOAD_EXTENSION=1 -DSQLITE_ENABLE_LOCKING_STYLE=0 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -DSQLITE_OMIT_DEPRECATED -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_DBSTAT_VTAB -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5

SHELL_OPTIONS = -Dmain=sqlite3_shell -DSQLITE_OMIT_LOAD_EXTENSION=1 -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) -DSQLITE_SHELL_DBNAME_PROC=fossil_open -Daccess=file_access -Dsystem=fossil_system -Dgetenv=fossil_getenv -Dfopen=fossil_fopen

SRC   = add_.c allrepo_.c attach_.c bag_.c bisect_.c blob_.c branch_.c browse_.c builtin_.c bundle_.c cache_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c doc_.c encode_.c event_.c export_.c file_.c finfo_.c foci_.c fusefs_.c glob_.c graph_.c gzip_.c http_.c http_socket_.c http_ssl_.c http_transport_.c import_.c info_.c json_.c json_artifact_.c json_branch_.c json_config_.c json_diff_.c json_dir_.c json_finfo_.c json_login_.c json_query_.c json_report_.c json_status_.c json_tag_.c json_timeline_.c json_user_.c json_wiki_.c leaf_.c loadctrl_.c login_.c lookslike_.c main_.c manifest_.c markdown_.c markdown_html_.c md5_.c merge_.c merge3_.c moderate_.c name_.c path_.c piechart_.c pivot_.c popen_.c pqueue_.c printf_.c publish_.c purge_.c rebuild_.c regexp_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c shun_.c sitemap_.c skins_.c sqlcmd_.c stash_.c stat_.c statrep_.c style_.c sync_.c tag_.c tar_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c unicode_.c update_.c url_.c user_.c utf8_.c util_.c verify_.c vfile_.c wiki_.c wikiformat_.c winfile_.c winhttp_.c wysiwyg_.c xfer_.c xfersetup_.c zip_.c 

OBJ   = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\bisect$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\builtin$O $(OBJDIR)\bundle$O $(OBJDIR)\cache$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\export$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\foci$O $(OBJDIR)\fusefs$O $(OBJDIR)\glob$O $(OBJDIR)\graph$O $(OBJDIR)\gzip$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\import$O $(OBJDIR)\info$O $(OBJDIR)\json$O $(OBJDIR)\json_artifact$O $(OBJDIR)\json_branch$O $(OBJDIR)\json_config$O $(OBJDIR)\json_diff$O $(OBJDIR)\json_dir$O $(OBJDIR)\json_finfo$O $(OBJDIR)\json_login$O $(OBJDIR)\json_query$O $(OBJDIR)\json_report$O $(OBJDIR)\json_status$O $(OBJDIR)\json_tag$O $(OBJDIR)\json_timeline$O $(OBJDIR)\json_user$O $(OBJDIR)\json_wiki$O $(OBJDIR)\leaf$O $(OBJDIR)\loadctrl$O $(OBJDIR)\login$O $(OBJDIR)\lookslike$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\markdown$O $(OBJDIR)\markdown_html$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\moderate$O $(OBJDIR)\name$O $(OBJDIR)\path$O $(OBJDIR)\piechart$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\publish$O $(OBJDIR)\purge$O $(OBJDIR)\rebuild$O $(OBJDIR)\regexp$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\shun$O $(OBJDIR)\sitemap$O $(OBJDIR)\skins$O $(OBJDIR)\sqlcmd$O $(OBJDIR)\stash$O $(OBJDIR)\stat$O $(OBJDIR)\statrep$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\tar$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\unicode$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\utf8$O $(OBJDIR)\util$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winfile$O $(OBJDIR)\winhttp$O $(OBJDIR)\wysiwyg$O $(OBJDIR)\xfer$O $(OBJDIR)\xfersetup$O $(OBJDIR)\zip$O $(OBJDIR)\shell$O $(OBJDIR)\sqlite3$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O 

Changes to win/Makefile.mingw.
1
2
3
4
5
6
7
8
9
10
11
12
13




14
15
16
17
18
19
20
#!/usr/bin/make
#
##############################################################################
# WARNING: DO NOT EDIT, AUTOMATICALLY GENERATED FILE (SEE "src/makemake.tcl")
##############################################################################
#
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#





#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-












>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#!/usr/bin/make
#
##############################################################################
# WARNING: DO NOT EDIT, AUTOMATICALLY GENERATED FILE (SEE "src/makemake.tcl")
##############################################################################
#
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#
# Some of the special options which can be passed to make
#   USE_WINDOWS=1    if building under a windows command prompt
#   X64=1            if using an unprefixed 64-bit mingw compiler
#

#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-
50
51
52
53
54
55
56




57
58
59
60
61
62
63
#
# FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1





#### Enable legacy treatment of mv/rm (skip checkout files)
#
# FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#






>
>
>
>







54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#
# FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1

#### Enable relative paths in external diff/gdiff
#
# FOSSIL_ENABLE_EXEC_REL_PATHS = 1

#### Enable legacy treatment of mv/rm (skip checkout files)
#
# FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2d
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If






|







156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2g
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214








215
216
217
218
219
220
221
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Os -Wall

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g








endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)







|
<
<
<
<
<
<






>
>
>
>
>
>
>
>







203
204
205
206
207
208
209
210






211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Wall







#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g
else
TCC += -Os
endif

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)

253
254
255
256
257
258
259






260
261
262
263
264
265
266
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif







# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif







>
>
>
>
>
>







263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif

# With relative paths in external diff/gdiff
ifdef FOSSIL_ENABLE_EXEC_REL_PATHS
TCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
RCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
endif

# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif

954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(LIB) $(OBJDIR)/fossil.o

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop







|







970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o $(LIB)

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop

2074
2075
2076
2077
2078
2079
2080


2081
2082
2083
2084
2085
2086
2087
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB \


                 -DSQLITE_WIN32_NO_ANSI \
                 -D_HAVE__MINGW_H \
                 -DSQLITE_USE_MALLOC_H \
                 -DSQLITE_USE_MSIZE

SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \






>
>







2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB \
                 -DSQLITE_ENABLE_JSON1 \
                 -DSQLITE_ENABLE_FTS5 \
                 -DSQLITE_WIN32_NO_ANSI \
                 -D_HAVE__MINGW_H \
                 -DSQLITE_USE_MALLOC_H \
                 -DSQLITE_USE_MSIZE

SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \
Changes to win/Makefile.mingw.mistachkin.
1
2
3
4
5
6
7
8
9
10
11
12
13




14
15
16
17
18
19
20
#!/usr/bin/make
#
##############################################################################
# WARNING: DO NOT EDIT, AUTOMATICALLY GENERATED FILE (SEE "src/makemake.tcl")
##############################################################################
#
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#





#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-












>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#!/usr/bin/make
#
##############################################################################
# WARNING: DO NOT EDIT, AUTOMATICALLY GENERATED FILE (SEE "src/makemake.tcl")
##############################################################################
#
# This file is automatically generated.  Instead of editing this
# file, edit "makemake.tcl" then run "tclsh makemake.tcl"
# to regenerate this file.
#
# This is a makefile for use on Cygwin/Darwin/FreeBSD/Linux/Windows using
# MinGW or MinGW-w64.
#
# Some of the special options which can be passed to make
#   USE_WINDOWS=1    if building under a windows command prompt
#   X64=1            if using an unprefixed 64-bit mingw compiler
#

#### Select one of MinGW, MinGW-w64 (32-bit) or MinGW-w64 (64-bit) compilers.
#    By default, this is an empty string (i.e. use the native compiler).
#
PREFIX =
# PREFIX = mingw32-
# PREFIX = i686-pc-mingw32-
50
51
52
53
54
55
56




57
58
59
60
61
62
63
#
FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1





#### Enable legacy treatment of mv/rm (skip checkout files)
#
FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#






>
>
>
>







54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#
FOSSIL_ENABLE_SSL = 1

#### Automatically build OpenSSL when building Fossil (causes rebuild
#    issues when building incrementally).
#
# FOSSIL_BUILD_SSL = 1

#### Enable relative paths in external diff/gdiff
#
# FOSSIL_ENABLE_EXEC_REL_PATHS = 1

#### Enable legacy treatment of mv/rm (skip checkout files)
#
FOSSIL_ENABLE_LEGACY_MV_RM = 1

#### Enable TH1 scripts in embedded documentation files
#
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2d
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If






|







156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
endif

#### The directories where the OpenSSL include and library files are located.
#    The recommended usage here is to use the Sysinternals junction tool
#    to create a hard link between an "openssl-1.x" sub-directory of the
#    Fossil source code directory and the target OpenSSL source directory.
#
OPENSSLDIR = $(SRCDIR)/../compat/openssl-1.0.2g
OPENSSLINCDIR = $(OPENSSLDIR)/include
OPENSSLLIBDIR = $(OPENSSLDIR)

#### Either the directory where the Tcl library is installed or the Tcl
#    source code directory resides (depending on the value of the macro
#    FOSSIL_TCL_SOURCE).  If this points to the Tcl install directory,
#    this directory must have "include" and "lib" sub-directories.  If
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214








215
216
217
218
219
220
221
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Os -Wall

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g








endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)







|
<
<
<
<
<
<






>
>
>
>
>
>
>
>







203
204
205
206
207
208
209
210






211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
#### C Compile and options for use in building executables that
#    will run on the target platform.  This is usually the same
#    as BCC, unless you are cross-compiling.  This C compiler builds
#    the finished binary for fossil.  The BCC compiler above is used
#    for building intermediate code-generator tools.
#
TCC = $(PREFIX)gcc -Wall







#### Add the necessary command line options to build with debugging
#    symbols, if enabled.
#
ifdef FOSSIL_ENABLE_SYMBOLS
TCC += -g
else
TCC += -Os
endif

#### When not using the miniz compression library, zlib is required.
#
ifndef FOSSIL_ENABLE_MINIZ
TCC += -L$(ZLIBDIR) -I$(ZINCDIR)
endif

#### Compile resources for use in building executables that will run
#    on the target platform.
#
RCC = $(PREFIX)windres -I$(SRCDIR)

253
254
255
256
257
258
259






260
261
262
263
264
265
266
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif







# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif







>
>
>
>
>
>







263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
endif

# With HTTPS support
ifdef FOSSIL_ENABLE_SSL
TCC += -DFOSSIL_ENABLE_SSL=1
RCC += -DFOSSIL_ENABLE_SSL=1
endif

# With relative paths in external diff/gdiff
ifdef FOSSIL_ENABLE_EXEC_REL_PATHS
TCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
RCC += -DFOSSIL_ENABLE_EXEC_REL_PATHS=1
endif

# With legacy treatment of mv/rm
ifdef FOSSIL_ENABLE_LEGACY_MV_RM
TCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC += -DFOSSIL_ENABLE_LEGACY_MV_RM=1
endif

954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(LIB) $(OBJDIR)/fossil.o

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop







|







970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
ifdef FOSSIL_BUILD_SSL
APPTARGETS += openssl
endif

$(APPNAME):	$(APPTARGETS) $(OBJDIR)/headers $(CODECHECK1) $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o
	$(CODECHECK1) $(TRANS_SRC)
	$(TCC) -o $@ $(OBJ) $(EXTRAOBJ) $(OBJDIR)/fossil.o $(LIB)

# This rule prevents make from using its default rules to try build
# an executable named "manifest" out of the file named "manifest.c"
#
$(SRCDIR)/../manifest:
	# noop

2074
2075
2076
2077
2078
2079
2080


2081
2082
2083
2084
2085
2086
2087
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB \


                 -DSQLITE_WIN32_NO_ANSI \
                 -D_HAVE__MINGW_H \
                 -DSQLITE_USE_MALLOC_H \
                 -DSQLITE_USE_MSIZE

SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \






>
>







2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
                 -DSQLITE_THREADSAFE=0 \
                 -DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 -DSQLITE_OMIT_DEPRECATED \
                 -DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 -DSQLITE_ENABLE_FTS4 \
                 -DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 -DSQLITE_ENABLE_DBSTAT_VTAB \
                 -DSQLITE_ENABLE_JSON1 \
                 -DSQLITE_ENABLE_FTS5 \
                 -DSQLITE_WIN32_NO_ANSI \
                 -D_HAVE__MINGW_H \
                 -DSQLITE_USE_MALLOC_H \
                 -DSQLITE_USE_MSIZE

SHELL_OPTIONS = -Dmain=sqlite3_shell \
                -DSQLITE_OMIT_LOAD_EXTENSION=1 \
Changes to win/Makefile.msc.
44
45
46
47
48
49
50





51
52
53
54
55
56
57
FOSSIL_BUILD_ZLIB = 1
!endif

# Link everything except SQLite dynamically?
!ifndef FOSSIL_DYNAMIC_BUILD
FOSSIL_DYNAMIC_BUILD = 0
!endif






# Enable the JSON API?
!ifndef FOSSIL_ENABLE_JSON
FOSSIL_ENABLE_JSON = 0
!endif

# Enable legacy treatment of the mv/rm commands?






>
>
>
>
>







44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
FOSSIL_BUILD_ZLIB = 1
!endif

# Link everything except SQLite dynamically?
!ifndef FOSSIL_DYNAMIC_BUILD
FOSSIL_DYNAMIC_BUILD = 0
!endif

# Enable relative paths in external diff/gdiff?
!ifndef FOSSIL_ENABLE_EXEC_REL_PATHS
FOSSIL_ENABLE_EXEC_REL_PATHS = 0
!endif

# Enable the JSON API?
!ifndef FOSSIL_ENABLE_JSON
FOSSIL_ENABLE_JSON = 0
!endif

# Enable legacy treatment of the mv/rm commands?
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# Enable support for Windows XP with Visual Studio 201x?
!ifndef FOSSIL_ENABLE_WINXP
FOSSIL_ENABLE_WINXP = 0
!endif

!if $(FOSSIL_ENABLE_SSL)!=0
SSLDIR    = $(B)\compat\openssl-1.0.2d
SSLINCDIR = $(SSLDIR)\inc32
!if $(FOSSIL_DYNAMIC_BUILD)!=0
SSLLIBDIR = $(SSLDIR)\out32dll
!else
SSLLIBDIR = $(SSLDIR)\out32
!endif
SSLLFLAGS = /nologo /opt:ref /debug






|







91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# Enable support for Windows XP with Visual Studio 201x?
!ifndef FOSSIL_ENABLE_WINXP
FOSSIL_ENABLE_WINXP = 0
!endif

!if $(FOSSIL_ENABLE_SSL)!=0
SSLDIR    = $(B)\compat\openssl-1.0.2g
SSLINCDIR = $(SSLDIR)\inc32
!if $(FOSSIL_DYNAMIC_BUILD)!=0
SSLLIBDIR = $(SSLDIR)\out32dll
!else
SSLLIBDIR = $(SSLDIR)\out32
!endif
SSLLFLAGS = /nologo /opt:ref /debug
262
263
264
265
266
267
268





269
270
271
272
273
274
275
!if $(FOSSIL_ENABLE_SSL)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_SSL=1
RCC       = $(RCC) /DFOSSIL_ENABLE_SSL=1
LIBS      = $(LIBS) $(SSLLIB)
LIBDIR    = $(LIBDIR) /LIBPATH:$(SSLLIBDIR)
!endif






!if $(FOSSIL_ENABLE_LEGACY_MV_RM)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC       = $(RCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
!endif

!if $(FOSSIL_ENABLE_TH1_DOCS)!=0






>
>
>
>
>







267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
!if $(FOSSIL_ENABLE_SSL)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_SSL=1
RCC       = $(RCC) /DFOSSIL_ENABLE_SSL=1
LIBS      = $(LIBS) $(SSLLIB)
LIBDIR    = $(LIBDIR) /LIBPATH:$(SSLLIBDIR)
!endif

!if $(FOSSIL_ENABLE_EXEC_REL_PATHS)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_EXEC_REL_PATHS=1
RCC       = $(RCC) /DFOSSIL_ENABLE_EXEC_REL_PATHS=1
!endif

!if $(FOSSIL_ENABLE_LEGACY_MV_RM)!=0
TCC       = $(TCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
RCC       = $(RCC) /DFOSSIL_ENABLE_LEGACY_MV_RM=1
!endif

!if $(FOSSIL_ENABLE_TH1_DOCS)!=0
299
300
301
302
303
304
305


306
307
308
309
310
311
312
                 /DSQLITE_THREADSAFE=0 \
                 /DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 /DSQLITE_OMIT_DEPRECATED \
                 /DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 /DSQLITE_ENABLE_FTS4 \
                 /DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 /DSQLITE_ENABLE_DBSTAT_VTAB \


                 /DSQLITE_WIN32_NO_ANSI

SHELL_OPTIONS = /Dmain=sqlite3_shell \
                /DSQLITE_OMIT_LOAD_EXTENSION=1 \
                /DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) \
                /DSQLITE_SHELL_DBNAME_PROC=fossil_open \
                /Daccess=file_access \






>
>







309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
                 /DSQLITE_THREADSAFE=0 \
                 /DSQLITE_DEFAULT_FILE_FORMAT=4 \
                 /DSQLITE_OMIT_DEPRECATED \
                 /DSQLITE_ENABLE_EXPLAIN_COMMENTS \
                 /DSQLITE_ENABLE_FTS4 \
                 /DSQLITE_ENABLE_FTS3_PARENTHESIS \
                 /DSQLITE_ENABLE_DBSTAT_VTAB \
                 /DSQLITE_ENABLE_JSON1 \
                 /DSQLITE_ENABLE_FTS5 \
                 /DSQLITE_WIN32_NO_ANSI

SHELL_OPTIONS = /Dmain=sqlite3_shell \
                /DSQLITE_OMIT_LOAD_EXTENSION=1 \
                /DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) \
                /DSQLITE_SHELL_DBNAME_PROC=fossil_open \
                /Daccess=file_access \
Changes to win/buildmsvc.bat.
257
258
259
260
261
262
263


264


265

266
267

268
269
270
271
272
273
274
    SET LIB=%PFILES_SDK71A%\Microsoft SDKs\Windows\7.1A\Lib;%LIB%
  )
  CALL :fn_UnsetVariable PFILES_SDK71A
  SET NMAKE_ARGS=%NMAKE_ARGS% FOSSIL_ENABLE_WINXP=1
  GOTO :EOF

:fn_UnsetVariable


  IF NOT "%1" == "" (


    SET %1=

    CALL :fn_ResetErrorLevel
  )

  GOTO :EOF

:fn_ResetErrorLevel
  VERIFY > NUL
  GOTO :EOF

:fn_SetErrorLevel






>
>
|
>
>
|
>
|

>







257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
    SET LIB=%PFILES_SDK71A%\Microsoft SDKs\Windows\7.1A\Lib;%LIB%
  )
  CALL :fn_UnsetVariable PFILES_SDK71A
  SET NMAKE_ARGS=%NMAKE_ARGS% FOSSIL_ENABLE_WINXP=1
  GOTO :EOF

:fn_UnsetVariable
  SETLOCAL
  SET VALUE=%1
  IF DEFINED VALUE (
    SET VALUE=
    ENDLOCAL
    SET %VALUE%=
  ) ELSE (
    ENDLOCAL
  )
  CALL :fn_ResetErrorLevel
  GOTO :EOF

:fn_ResetErrorLevel
  VERIFY > NUL
  GOTO :EOF

:fn_SetErrorLevel
Changes to win/fossil.rc.
123
124
125
126
127
128
129





130
131
132
133
134
135
136
      VALUE "SslEnabled", "Yes, " OPENSSL_VERSION_TEXT "\0"
#endif /* defined(FOSSIL_ENABLE_SSL) */
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
      VALUE "LegacyMvRm", "Yes\0"
#else
      VALUE "LegacyMvRm", "No\0"
#endif /* defined(FOSSIL_ENABLE_LEGACY_MV_RM) */





#if defined(FOSSIL_ENABLE_TH1_DOCS)
      VALUE "Th1Docs", "Yes\0"
#else
      VALUE "Th1Docs", "No\0"
#endif /* defined(FOSSIL_ENABLE_TH1_DOCS) */
#if defined(FOSSIL_ENABLE_TH1_HOOKS)
      VALUE "Th1Hooks", "Yes\0"






>
>
>
>
>







123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
      VALUE "SslEnabled", "Yes, " OPENSSL_VERSION_TEXT "\0"
#endif /* defined(FOSSIL_ENABLE_SSL) */
#if defined(FOSSIL_ENABLE_LEGACY_MV_RM)
      VALUE "LegacyMvRm", "Yes\0"
#else
      VALUE "LegacyMvRm", "No\0"
#endif /* defined(FOSSIL_ENABLE_LEGACY_MV_RM) */
#if defined(FOSSIL_ENABLE_EXEC_REL_PATHS)
      VALUE "ExecRelPaths", "Yes\0"
#else
      VALUE "ExecRelPaths", "No\0"
#endif /* defined(FOSSIL_ENABLE_EXEC_REL_PATHS) */
#if defined(FOSSIL_ENABLE_TH1_DOCS)
      VALUE "Th1Docs", "Yes\0"
#else
      VALUE "Th1Docs", "No\0"
#endif /* defined(FOSSIL_ENABLE_TH1_DOCS) */
#if defined(FOSSIL_ENABLE_TH1_HOOKS)
      VALUE "Th1Hooks", "Yes\0"
Added www/blame.wiki.


























































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
<title>The Annotate Algorithm</title>

<h2>1.0 Introduction</h2>

The [/help?cmd=annotate|fossil annotate],
[/help?cmd=blame|fossil blame], and
[/help?cmd=praise|fossil praise] commands, and the
[/help?cmd=/annotate|/annotate],
[/help?cmd=/blame|/blame], and
[/help?cmd=/praise|/praise] web pages are all used to show the most
recent check-in that modified each line of a particular file.
This article overviews the algorithm used to compute the annotation
for a file in Fossil.

<h2>2.0 Algorithm</h2>

<ol type='1'>
<li>Locate the check-in that contains the file that is to be
    annotated.  Call this check-in C0.
<li>Find all direct ancestors of C0.  A direct ancestor is the closure
    of the primary parent of C0.  Merged in branches are not part of
    the direct ancestors of C0.
<li>Prune the list of ancestors of C0 so that it contains only 
    check-in in which the file to be annotated was modified.
<li>Load the complete text of the file to be annotated from check-in C0.
    Call this version of the file F0.
<li>Parse F0 into lines.  Mark each line as "unchanged".
<li>For each ancestor of C0 on the pruned list (call the ancestor CX), 
    beginning with the most
    recent ancestor and moving toward the oldest ancestor, do the
    following steps:
<ol type='a'>
<li>Load the text for the file to be annotated as it existed in check-in CX.
    Call this text FX.
<li>Compute a diff going from FX to F0.
<li>For each line of F0 that is changed in the diff and which was previously
    marked "unchanged", update the mark to indicated that line
    was modified by CX.
</ol>
<li>Show each line of F0 together with its change mark, appropriately
    formatted.
</ol>

<h2>3.0 Discussion and Notes</h2>

The time-consuming part of this algorithm is step 6b - computing the
diff from all historical versions of the file to the version of the file
under analysis.  For a large file that has many historical changes, this
can take several seconds.  For this reason, the default 
[/help?cmd=/annotate|/annotate] webpage only shows those lines that where
changed by the 20 most recent modifications to the file.  This allows
the loop on step 6 to terminate after only 19 diffs instead of the hundreds
or thousands of diffs that might be required for a frequently modified file.

As currently implemented (as of 2015-12-12) the annotate algorithm does not
follow files across name changes.  File name change information is 
available in the database, and so the algorithm could be enhanced to follow
files across name changes by modifications to step 3.

Step 2 is interesting in that it is
[/artifact/6cb824a0417?ln=196-201 | implemented] using a
[https://www.sqlite.org/lang_with.html#recursivecte|recursive common table expression].
Changes to www/build.wiki.
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126






127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
all Unix and Unix-like systems.  Simply type "<b>make</b>".

<li><p><i>Unix without running "configure"</i> → if you prefer to avoid
running configure, you can also use: <b>make -f Makefile.classic</b>.  You may
want to make minor edits to Makefile.classic to configure the build for your
system.

<li><p><i>MinGW3.x (not 4.0)/MinGW-w64</i> → Use the mingw makefile:
"<b>make -f win/Makefile.mingw</b>". On a Windows box you will
need either Cygwin or Msys as build environment. On Cygwin, Linux
or Darwin you may want to make minor edits to win/Makefile.mingw
to configure the cross-compile environment.

To enable the native [./th1.md#tclEval | Tcl integration feature], use a
command line like the following (all on one line):

<b>make -f win/Makefile.mingw FOSSIL_ENABLE_TCL=1 FOSSIL_ENABLE_TCL_STUBS=1 FOSSIL_ENABLE_TCL_PRIVATE_STUBS=1</b>







Hint: don't use MinGW-4.0, it will compile but fossil won't work correctly, see
<a href="https://www.fossil-scm.org/index.html/tktview/18cff45a4e210430e24c">https://www.fossil-scm.org/index.html/tktview/18cff45a4e210430e24c</a>.

<li><p><i>MSVC</i> → Use the MSVC makefile.  First
change to the "win/" subdirectory ("<b>cd win</b>") then run
"<b>nmake /f Makefile.msc</b>".<br><br>Alternatively, the batch
file "<b>win\buildmsvc.bat</b>" may be used and it will attempt to
detect and use the latest installed version of MSVC.<br><br>To enable
the optional <a href="https://www.openssl.org/">OpenSSL</a> support,
first <a href="https://www.openssl.org/source/">download the official
source code for OpenSSL</a> and extract it to an appropriately named
"<b>openssl-X.Y.ZA</b>" subdirectory within the local
[/tree?ci=trunk&name=compat | compat] directory (e.g.
"<b>compat/openssl-1.0.2d</b>"), then make sure that some recent
<a href="http://www.perl.org/">Perl</a> binaries are installed locally,
and finally run one of the following commands:
<blockquote><pre>
nmake /f Makefile.msc FOSSIL_ENABLE_SSL=1 FOSSIL_BUILD_SSL=1 PERLDIR=C:\full\path\to\Perl\bin
</pre></blockquote>
<blockquote><pre>
buildmsvc.bat FOSSIL_ENABLE_SSL=1 FOSSIL_BUILD_SSL=1 PERLDIR=C:\full\path\to\Perl\bin






|
|
|
|
|






>
>
>
>
>
>
|
|











|







109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
all Unix and Unix-like systems.  Simply type "<b>make</b>".

<li><p><i>Unix without running "configure"</i> → if you prefer to avoid
running configure, you can also use: <b>make -f Makefile.classic</b>.  You may
want to make minor edits to Makefile.classic to configure the build for your
system.

<li><p><i>MinGW 3.x (<u>not</u> 4.x) / MinGW-w64</i> → Use the MinGW makefile:
"<b>make -f win/Makefile.mingw</b>".  On a Windows box you will need either
Cygwin or Msys as build environment. On Cygwin, Linux or Darwin you may want
to make minor edits to win/Makefile.mingw to configure the cross-compile
environment.

To enable the native [./th1.md#tclEval | Tcl integration feature], use a
command line like the following (all on one line):

<b>make -f win/Makefile.mingw FOSSIL_ENABLE_TCL=1 FOSSIL_ENABLE_TCL_STUBS=1 FOSSIL_ENABLE_TCL_PRIVATE_STUBS=1</b>

Alternatively, <b>./configure</b> may now be used to create a Makefile
suitable for use with MinGW; however, options passed to configure that are
not applicable on Windows may cause the configuration or compilation to fail
(e.g. fusefs, internal-sqlite, etc).

<i>HINT</i>: Do <u>not</u> use MinGW-4.x, it may compile but the Fossil binary
will not work correctly, see
[https://www.fossil-scm.org/index.html/tktview/18cff45a4e210430e24c | ticket].

<li><p><i>MSVC</i> → Use the MSVC makefile.  First
change to the "win/" subdirectory ("<b>cd win</b>") then run
"<b>nmake /f Makefile.msc</b>".<br><br>Alternatively, the batch
file "<b>win\buildmsvc.bat</b>" may be used and it will attempt to
detect and use the latest installed version of MSVC.<br><br>To enable
the optional <a href="https://www.openssl.org/">OpenSSL</a> support,
first <a href="https://www.openssl.org/source/">download the official
source code for OpenSSL</a> and extract it to an appropriately named
"<b>openssl-X.Y.ZA</b>" subdirectory within the local
[/tree?ci=trunk&name=compat | compat] directory (e.g.
"<b>compat/openssl-1.0.2g</b>"), then make sure that some recent
<a href="http://www.perl.org/">Perl</a> binaries are installed locally,
and finally run one of the following commands:
<blockquote><pre>
nmake /f Makefile.msc FOSSIL_ENABLE_SSL=1 FOSSIL_BUILD_SSL=1 PERLDIR=C:\full\path\to\Perl\bin
</pre></blockquote>
<blockquote><pre>
buildmsvc.bat FOSSIL_ENABLE_SSL=1 FOSSIL_BUILD_SSL=1 PERLDIR=C:\full\path\to\Perl\bin
Changes to www/changes.wiki.
1
2
3
4





























5

6
7
8








9
10
11
12
13
14
15
16







17




18

19
20
21
22
23
24
25
<title>Change Log</title>

<h2>Changes for Version 1.34 (2015-??-??)</h2>






























  *  Fix --hard option to [/help?cmd=mv|fossil mv] and [/help?cmd=rm|fossil rm]

     to enable them to work properly with certain relative paths.
  *  Make the [/help?cmd=clean|fossil clean] command undoable for files less
     than 10MiB.








  *  Add minimal <nowiki>[lsearch]</nowiki> command to TH1. Only exact
     case-sensitive matching is supported.
  *  Add the <nowiki>[glob_match]</nowiki> and <nowiki>[markdown]</nowiki>
     commands to TH1.
  *  Add the <nowiki>[tclIsSafe] and [tclMakeSafe]</nowiki> TH1 commands to
     the Tcl integration subsystem.
  *  Add 'double', 'integer', and 'list' classes to the
     <nowiki>[string is]</nowiki> command in TH1.







  *  Update internal Unicode character tables, used in regular expression




     handling, from version 7.0 to 8.0.


<h2>Changes for Version 1.33 (2015-05-23)</h2>
  *  Improved fork detection on [/help?cmd=update|fossil update],
     [/help?cmd=status|fossil status] and related commands.
  *  Change the default skin to what used to be called "San Francisco Modern".
  *  Add the [/repo-tabsize] web page
  *  Add [/help?cmd=import|fossil import --svn], for importing a subversion

|

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
|


>
>
>
>
>
>
>
>
|
|
|
|
|
|
|
|
>
>
>
>
>
>
>
|
>
>
>
>
|
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
<title>Change Log</title>

<h2>Changes for Version 1.35 (2016-00-00)</h2>

  *  Enable symlinks by default on all non-Windows platforms.
  *  Rework the [/help?cmd=/setup_ulist|/setup_list page] (the User List page)
     to display all users
     in a click-to-sort table.
  *  Fix backslash-octal escape on filenames while importing from git
  *  When markdown documents begin with &lt;h1&gt; HTML elements, use that
     header at the document title.
  *  Added the [/help?cmd=/bigbloblist|/bigbloblist page].
  *  Enhance the [/help?cmd=/finfo|/finfo page] so that when it is showing
     the ancestors of a particular file version, it only shows direct
     ancestors and omits changes on branches, thus making it show the same set
     of ancestors that are used for [/help?cmd=/blame|/blame].
  *  Added the --page option to the [/help?cmd=ui|fossil ui] command
  *  Added the [/help?cmd=bisect|fossil bisect ui] command
  *  Enhanced the [/help?cmd=diff|fossil diff] command so that it accepts
     directory names as arguments and computes diffs on all files contained
     within those directories.
  *  Fix the [/help?cmd=add|fossil add] command so that it shows "SKIP" for
     files added that were already under management.
  *  TH1 enhancements:
     <ul><li>Add <nowiki>[array exists]</nowiki> command.</li>
     <li>Add minimal <nowiki>[array names]</nowiki> command.</li>
     <li>Add tcl_platform(engine) and tcl_platform(platform) array
     elements.</li>
     </ul>
  *  Get autosetup working with MinGW.
  *  Fix autosetup detection of zlib in the source tree.
  *  Added autosetup detection of OpenSSL when it may be present under the
     "compat" subdirectory of the source tree.

<h2>Changes for Version 1.34 (2015-11-02)</h2>

  *  Make the [/help?cmd=clean|fossil clean] command undoable for files less
     than 10MiB.
  *  Update internal Unicode character tables, used in regular expression
     handling, from version 7.0 to 8.0.
  *  Add the new [/help?cmd=amend|amend] command which is used to modify
     tags of a "check-in".
  *  Fix bug in [/help?cmd=import|import] command, handling version 3 of
     the svndump format for subversion.
  *  Add the [/help?cmd=all|all cache] command.
  *  TH1 enhancements:
     <ul><li>Add minimal <nowiki>[lsearch]</nowiki> command. Only exact
     case-sensitive matching is supported.</li>
     <li>Add the <nowiki>[glob_match]</nowiki>, <nowiki>[markdown]</nowiki>,
     <nowiki>[dir]</nowiki>, and <nowiki>[encode64]</nowiki> commands.</li>
     <li>Add the <nowiki>[tclIsSafe] and [tclMakeSafe]</nowiki> commands to
     the Tcl integration subsystem.</li>
     <li>Add 'double', 'integer', and 'list' classes to the
     <nowiki>[string is]</nowiki> command.</li>
     </ul>
  *  Add the --undo option to the [/help?cmd=diff|diff] command.
  *  Build-in Antirez's "linenoise" command-line editing library for use with
     the [/help?cmd=sqlite3|fossil sql] command on Unix platforms.
  *  Add [/help?cmd=stash|stash cat] as an alias for the
     [/help?cmd=stash|stash show] command.
  *  Automatically pull before [/help?cmd=merge|fossil merge] when auto-sync
     is enabled.
  *  Fix --hard option to [/help?cmd=mv|fossil mv] and [/help?cmd=rm|fossil rm]
     to enable them to work properly with certain relative paths.
  *  Change the mimetype for ".n" and ".man" files to text/plain.
  *  Display improvements in the [/help?cmd=bisect|fossil bisect chart] command.
  *  Updated the built-in SQLite to version 3.9.1 and activated JSON1 and FTS5
     support (both currently unused within Fossil).

<h2>Changes for Version 1.33 (2015-05-23)</h2>
  *  Improved fork detection on [/help?cmd=update|fossil update],
     [/help?cmd=status|fossil status] and related commands.
  *  Change the default skin to what used to be called "San Francisco Modern".
  *  Add the [/repo-tabsize] web page
  *  Add [/help?cmd=import|fossil import --svn], for importing a subversion
Changes to www/checkin_names.wiki.
13
14
15
16
17
18
19

20
21
22
23
24
25
26
<li> <b>root :</b> <i>branchname</i>
<li> Special names:
<ul>
<li> <b>tip</b>
<li> <b>current</b>
<li> <b>next</b>
<li> <b>previous</b> or <b>prev</b>

</ul>
</ul>
</td></tr>
</table>
Many Fossil [/help|commands] and [./webui.wiki | web-interface] URLs accept
check-in names as an argument.  For example, the "[/help/info|info]" command
accepts an optional check-in name to identify the specific checkout






>







13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
<li> <b>root :</b> <i>branchname</i>
<li> Special names:
<ul>
<li> <b>tip</b>
<li> <b>current</b>
<li> <b>next</b>
<li> <b>previous</b> or <b>prev</b>
<li> <b>ckout</b> for embedded docs
</ul>
</ul>
</td></tr>
</table>
Many Fossil [/help|commands] and [./webui.wiki | web-interface] URLs accept
check-in names as an argument.  For example, the "[/help/info|info]" command
accepts an optional check-in name to identify the specific checkout
214
215
216
217
218
219
220






221
222
223
224
225
226
227
equivalent to the timestamp tag "5000-01-01".

If the command is being run from a working check-out (not against a bare
repository) then a few extra tags apply.  The "current" tag means the
current check-out.  The "next" tag means the youngest child of the
current check-out.  And the "previous" or "prev" tag means the primary
(non-merge) parent of the current check-out.







<h2>Additional Examples</h2>

To view the changes in the most recent check-in prior to the version currently
checked out:

<blockquote><pre>






>
>
>
>
>
>







215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
equivalent to the timestamp tag "5000-01-01".

If the command is being run from a working check-out (not against a bare
repository) then a few extra tags apply.  The "current" tag means the
current check-out.  The "next" tag means the youngest child of the
current check-out.  And the "previous" or "prev" tag means the primary
(non-merge) parent of the current check-out.

For embedded documentation, the tag "ckout" means the version as present in
the local source tree on disk, provided that the web server is started using
"fossil ui" or "fossil server" from within the source tree. This tag can be 
used to preview local changes to documentation before committing them. It does
not apply to CLI commands.

<h2>Additional Examples</h2>

To view the changes in the most recent check-in prior to the version currently
checked out:

<blockquote><pre>
Changes to www/concepts.wiki.
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
development of a software project and to record the history
of the project.
There are many such systems in use today.  Fossil strives to
distinguish itself from the others by being extremely simple
to setup and operate.

This document is intended as a quick introduction to the concepts
behind fossil.

<h2>2.0 Composition Of A Project</h2>
<img src="concept1.gif" align="right" hspace="10">

A software project normally consists of a "source tree".
A source tree is a hierarchy of files that are used to generate
the end product.  The source tree changes over time as the
software grows and expands and as features are added and bugs
are fixed.  A snapshot of the source tree at any point in time
is called a "version" or "revision" or a "baseline" of the product.
In fossil, we use the name "check-in".

A "repository" is a database that contains copies of all historical
check-ins for a project.  Check-ins are normally stored in the
repository in a highly space-efficient compressed format (delta encoding).
But that is an implementation detail that you the user need not worry over.
Think of the repository as a safe place where all your old check-ins are
securely stored away and available for retrieval whenever you need
them.

A repository in fossil is a single file on your disk.  This file
might be rather large (dozens or hundreds of megabytes for a large
or long running project) but it is nevertheless just a file.  You
can move it around, rename it, write it out to a memory stick, or
do anything else you normally do with files.

Each source tree that is controlled by fossil is associated with
a single repository on the local disk drive.  You can tie two or more
source trees to a single repository if you want (though one
tree per repository is the most common configuration.)  So a
single repository can be associated with many source trees, but
each source tree is associated with only one repository.

Fossil source trees may not overlap.  A fossil source tree is identified
by a file named "_FOSSIL_" (or ".fslckout", but this article will always
use the name "_FOSSIL_") in the root directory of the source tree.  Every
file that is a sibling of _FOSSIL_ and every file in every subfolder is
considered potentially a part of the source tree.  The _FOSSIL_ file
contains (among other things) the pathname of the repository with which
the source tree is associated.  On the other hand, the repository has
no record of its source trees.  So you are free to delete a source tree






|










|









|





|






|







9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
development of a software project and to record the history
of the project.
There are many such systems in use today.  Fossil strives to
distinguish itself from the others by being extremely simple
to setup and operate.

This document is intended as a quick introduction to the concepts
behind Fossil.

<h2>2.0 Composition Of A Project</h2>
<img src="concept1.gif" align="right" hspace="10">

A software project normally consists of a "source tree".
A source tree is a hierarchy of files that are used to generate
the end product.  The source tree changes over time as the
software grows and expands and as features are added and bugs
are fixed.  A snapshot of the source tree at any point in time
is called a "version" or "revision" or a "baseline" of the product.
In Fossil, we use the name "check-in".

A "repository" is a database that contains copies of all historical
check-ins for a project.  Check-ins are normally stored in the
repository in a highly space-efficient compressed format (delta encoding).
But that is an implementation detail that you the user need not worry over.
Think of the repository as a safe place where all your old check-ins are
securely stored away and available for retrieval whenever you need
them.

A repository in Fossil is a single file on your disk.  This file
might be rather large (dozens or hundreds of megabytes for a large
or long running project) but it is nevertheless just a file.  You
can move it around, rename it, write it out to a memory stick, or
do anything else you normally do with files.

Each source tree that is controlled by Fossil is associated with
a single repository on the local disk drive.  You can tie two or more
source trees to a single repository if you want (though one
tree per repository is the most common configuration.)  So a
single repository can be associated with many source trees, but
each source tree is associated with only one repository.

Fossil source trees may not overlap.  A Fossil source tree is identified
by a file named "_FOSSIL_" (or ".fslckout", but this article will always
use the name "_FOSSIL_") in the root directory of the source tree.  Every
file that is a sibling of _FOSSIL_ and every file in every subfolder is
considered potentially a part of the source tree.  The _FOSSIL_ file
contains (among other things) the pathname of the repository with which
the source tree is associated.  On the other hand, the repository has
no record of its source trees.  So you are free to delete a source tree
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
6089f0b563a9db0a6d90682fe47fd7161ff867c8<br>
59712614a1b3ccfd84078a37fa5b606e28434326<br>
19dbf73078be9779edd6a0156195e610f81c94f9<br>
b4104959a67175f02d6b415480be22a239f1f077<br>
997c9d6ae03ad114b2b57f04e9eeef17dcb82788
</b></blockquote>

When referring to an artifact using fossil, you can use a unique
prefix of the artifact ID that is four characters or longer.  This saves
a lot of typing.  When displaying artifact IDs, fossil will usually only
show the first 10 digits since that is normally enough to uniquely
identify a file.

Changing (or adding or removing) a single byte in a file results
in a completely different artifact ID.  And since the artifact ID is the name of
the artifact, making any change to a file results in a new artifact.
In this way, artifacts are immutable.






|

|







96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
6089f0b563a9db0a6d90682fe47fd7161ff867c8<br>
59712614a1b3ccfd84078a37fa5b606e28434326<br>
19dbf73078be9779edd6a0156195e610f81c94f9<br>
b4104959a67175f02d6b415480be22a239f1f077<br>
997c9d6ae03ad114b2b57f04e9eeef17dcb82788
</b></blockquote>

When referring to an artifact using Fossil, you can use a unique
prefix of the artifact ID that is four characters or longer.  This saves
a lot of typing.  When displaying artifact IDs, Fossil will usually only
show the first 10 digits since that is normally enough to uniquely
identify a file.

Changing (or adding or removing) a single byte in a file results
in a completely different artifact ID.  And since the artifact ID is the name of
the artifact, making any change to a file results in a new artifact.
In this way, artifacts are immutable.
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
Associated with every check-in is a special file called the
[./fileformat.wiki#manifest| "manifest"].  The manifest is a
listing of all other files in
that source tree.  The manifest contains the (complete) artifact ID 
of the file and the name of the file as it appears on disk,
and thus serves as a mapping from artifact ID to disk name.  The artifact ID
of the manifest is the identifier for the entire check-in.  When
you look at a "timeline" of changes in fossil, the ID associated
with each check-in or commit is really just the artifact ID of the
manifest for that check-in.

<p>The manifest file is not normally a real file on disk.  Instead,
the manifest is computed in memory by Fossil whenever it needs it.
However, the "fossil setting manifest on" command will cause the
manifest file to be materialized to disk, if desired.  Both Fossil






|







128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
Associated with every check-in is a special file called the
[./fileformat.wiki#manifest| "manifest"].  The manifest is a
listing of all other files in
that source tree.  The manifest contains the (complete) artifact ID 
of the file and the name of the file as it appears on disk,
and thus serves as a mapping from artifact ID to disk name.  The artifact ID
of the manifest is the identifier for the entire check-in.  When
you look at a "timeline" of changes in Fossil, the ID associated
with each check-in or commit is really just the artifact ID of the
manifest for that check-in.

<p>The manifest file is not normally a real file on disk.  Instead,
the manifest is computed in memory by Fossil whenever it needs it.
However, the "fossil setting manifest on" command will cause the
manifest file to be materialized to disk, if desired.  Both Fossil
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
<li>A <b>check-in</b> is a set of files arranged
    in a hierarchy.</li>
<li>A <b>repository</b> keeps a record of historical check-ins.</li>
<li>Repositories share their changes using <b>push</b>, <b>pull</b>,
    <b>sync</b>, and <b>clone</b>.</li>
<li>A particular <u>version</u> of a particular file is an <b>artifact</b>
    that is identified by an <b>artifact ID</b>.</li>
<li>Artifacts tracked by fossil are inherently immutable.</li>
<li>Fossil automatically generates a <b>manifest</b> file that identifies
    every artifact in a check-in.</li>
<li>The artifact ID of the manifest is the identifier of the check-in.</li>
</ul>

<h2>3.0 Fossil - The Program</h2>

Fossil is software.  The implementation of fossil is in the form
of a single executable named "fossil" (or "fossil.exe" on Windows).
To install fossil on your system,
all you have to do is obtain a copy of this one executable file (either
by downloading a
<a href="http://www.fossil-scm.org/download.html">pre-compiled version</a>
or [./build.wiki | compiling it yourself]) and then
putting that file somewhere on your PATH.

Fossil is completely self-contained.  It is not necessary to
install any other software in order to use fossil.  You do <u>not</u> need
CVS, gzip, diff, rsync, Python, Perl, Tcl, Java, apache, PostgreSQL, MySQL,
SQLite, patch, or any similar software on your system in order to use
fossil effectively.  You will want to have some kind of text editor
for entering check-in comments.  Fossil will use whatever text editor
is identified by your VISUAL environment variable.  Fossil will also
use GPG to clearsign your manifests if you happen to have it installed,
but fossil will skip that step if GPG missing from your system.
You can optionally set up fossil to use external "diff" programs, 
though fossil has an excellent built-in "diff" algorithm that works
fine for most people.  If you happen to have Tcl/Tk installed on your
system, Fossil will use it to generate a graphical "diff" display when
you use the --tk option to the "diff" command, but this too is entirely
optional.


To uninstall fossil, simply delete the executable.

To upgrade an older version of fossil to a newer version, just
replace the old executable with the new one.  You might need to 
run "<b>fossil all rebuild</b>" to restructure your repositories after
an upgrade.  Running "all rebuild" never hurts, so when upgrading it
is a good policy to run it even if it is not strictly necessary.

To use fossil, simply type the name of the executable in your
shell, followed by one of the various built-in commands and
arguments appropriate for that command.  For example:

<blockquote><b>
fossil help
</b></blockquote>

In the next section, when we say things like "use the <b>help</b>
command" we mean to use the command name "help" as the first
token after the name of the fossil executable, as shown above.

<a name="workflow"></a>
<h2>4.0 Workflow</h2>

<img src="concept2.gif" align="right" hspace="10">

Fossil has two modes of operation: <i>"autosync"</i> and
<i>"manual-merge"</i>
Autosync mode is reminiscent of CVS or SVN in that it automatically
keeps your changes in synchronization with your co-workers through
the use of a central server.  The manual-merge mode is the standard workflow
for GIT or Mercurial in that your local repository develops
independently of your coworkers and you share and merge your changes manually.
An interesting feature of fossil is that it supports both autosync
and manual-merge work flows.

The default setting for fossil is to be in autosync mode.  You
can change the autosync setting or check the current autosync
setting using commands like:

<blockquote>
<b>fossil setting autosync on<br>
fossil setting autosync off<br>
<b>fossil settings</b>
</blockquote>

By default, fossil runs with autosync mode turned on.  The
authors finds that projects run more smoothly in autosync mode since
autosync helps to prevent pointless forking and merge and helps keeps
all collaborators working on exactly the same code rather than on their
own personal forks of the code.  In the author's view, manual-merge mode
should be reserved for disconnected operation.

<h3>4.1 Autosync Workflow</h3>






|







|

|







|


|



|
|
|






|

|





|









|













|


|









|







167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
<li>A <b>check-in</b> is a set of files arranged
    in a hierarchy.</li>
<li>A <b>repository</b> keeps a record of historical check-ins.</li>
<li>Repositories share their changes using <b>push</b>, <b>pull</b>,
    <b>sync</b>, and <b>clone</b>.</li>
<li>A particular <u>version</u> of a particular file is an <b>artifact</b>
    that is identified by an <b>artifact ID</b>.</li>
<li>Artifacts tracked by Fossil are inherently immutable.</li>
<li>Fossil automatically generates a <b>manifest</b> file that identifies
    every artifact in a check-in.</li>
<li>The artifact ID of the manifest is the identifier of the check-in.</li>
</ul>

<h2>3.0 Fossil - The Program</h2>

Fossil is software.  The implementation of Fossil is in the form
of a single executable named "fossil" (or "fossil.exe" on Windows).
To install Fossil on your system,
all you have to do is obtain a copy of this one executable file (either
by downloading a
<a href="http://www.fossil-scm.org/download.html">pre-compiled version</a>
or [./build.wiki | compiling it yourself]) and then
putting that file somewhere on your PATH.

Fossil is completely self-contained.  It is not necessary to
install any other software in order to use Fossil.  You do <u>not</u> need
CVS, gzip, diff, rsync, Python, Perl, Tcl, Java, apache, PostgreSQL, MySQL,
SQLite, patch, or any similar software on your system in order to use
Fossil effectively.  You will want to have some kind of text editor
for entering check-in comments.  Fossil will use whatever text editor
is identified by your VISUAL environment variable.  Fossil will also
use GPG to clearsign your manifests if you happen to have it installed,
but Fossil will skip that step if GPG missing from your system.
You can optionally set up Fossil to use external "diff" programs, 
though Fossil has an excellent built-in "diff" algorithm that works
fine for most people.  If you happen to have Tcl/Tk installed on your
system, Fossil will use it to generate a graphical "diff" display when
you use the --tk option to the "diff" command, but this too is entirely
optional.


To uninstall Fossil, simply delete the executable.

To upgrade an older version of Fossil to a newer version, just
replace the old executable with the new one.  You might need to 
run "<b>fossil all rebuild</b>" to restructure your repositories after
an upgrade.  Running "all rebuild" never hurts, so when upgrading it
is a good policy to run it even if it is not strictly necessary.

To use Fossil, simply type the name of the executable in your
shell, followed by one of the various built-in commands and
arguments appropriate for that command.  For example:

<blockquote><b>
fossil help
</b></blockquote>

In the next section, when we say things like "use the <b>help</b>
command" we mean to use the command name "help" as the first
token after the name of the Fossil executable, as shown above.

<a name="workflow"></a>
<h2>4.0 Workflow</h2>

<img src="concept2.gif" align="right" hspace="10">

Fossil has two modes of operation: <i>"autosync"</i> and
<i>"manual-merge"</i>
Autosync mode is reminiscent of CVS or SVN in that it automatically
keeps your changes in synchronization with your co-workers through
the use of a central server.  The manual-merge mode is the standard workflow
for GIT or Mercurial in that your local repository develops
independently of your coworkers and you share and merge your changes manually.
An interesting feature of Fossil is that it supports both autosync
and manual-merge work flows.

The default setting for Fossil is to be in autosync mode.  You
can change the autosync setting or check the current autosync
setting using commands like:

<blockquote>
<b>fossil setting autosync on<br>
fossil setting autosync off<br>
<b>fossil settings</b>
</blockquote>

By default, Fossil runs with autosync mode turned on.  The
authors finds that projects run more smoothly in autosync mode since
autosync helps to prevent pointless forking and merge and helps keeps
all collaborators working on exactly the same code rather than on their
own personal forks of the code.  In the author's view, manual-merge mode
should be reserved for disconnected operation.

<h3>4.1 Autosync Workflow</h3>
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
to exist in historical check-ins.)  Test your changes.
</li>

<li>
Create a new check-in using the <b>commit</b> command.  You will be prompted
for a check-in comment and also for your GPG key if you have GPG installed.
The commit copies the edits you have made in your local source
tree into your local repository.  After your commit completes, fossil will
automatically <b>push</b> your changes back to the server
you cloned from or whatever server you most recently synced with.
</li>

<li>
When your coworkers make their own changes, you can merge those changes
into your local local source tree using the <b>update</b> command.  






|







288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
to exist in historical check-ins.)  Test your changes.
</li>

<li>
Create a new check-in using the <b>commit</b> command.  You will be prompted
for a check-in comment and also for your GPG key if you have GPG installed.
The commit copies the edits you have made in your local source
tree into your local repository.  After your commit completes, Fossil will
automatically <b>push</b> your changes back to the server
you cloned from or whatever server you most recently synced with.
</li>

<li>
When your coworkers make their own changes, you can merge those changes
into your local local source tree using the <b>update</b> command.  
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
</ol>

<h2>5.0 Setting Up A Fossil Server</h2>

With other configuration management software, setting up a server is
a lot of work and normally takes time, patience, and a lot of system
knowledge.  Fossil is designed to avoid this frustration.  Setting up
a server with fossil is ridiculously easy.  You have four options:</p>

<ol>
<li><p><b>Stand-alone server.</b>
Simply run the [/help?cmd=server|fossil server] or
[/help?cmd=ui|fossil ui] command from the command-line.

<li><p><b>CGI.</b>






|







393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
</ol>

<h2>5.0 Setting Up A Fossil Server</h2>

With other configuration management software, setting up a server is
a lot of work and normally takes time, patience, and a lot of system
knowledge.  Fossil is designed to avoid this frustration.  Setting up
a server with Fossil is ridiculously easy.  You have four options:</p>

<ol>
<li><p><b>Stand-alone server.</b>
Simply run the [/help?cmd=server|fossil server] or
[/help?cmd=ui|fossil ui] command from the command-line.

<li><p><b>CGI.</b>
Changes to www/copyright-release.html.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
<h1 align="center">
Fossil SCM Contributor Agreement
</h1>

<p>
This agreement applies to your contribution of material to the
Fossil Software Configuration Management System ("Fossil") that is
managed by Hipp, Wyrick &amp; Company, Inc. ("Hwaci") and
sets out the intellectual property rights you grant to Hwaci in the
contributed material.
The terms "contribution" and "contributed material" mean any source code, 
object code, patch, tool, sample, graphic, specification, manual, 
documentation, or any other material posted, submitted, or uploaded by 
you to the Fossil project.
 The term "you" means the person identified
and signing at the bottom of this document.  If your contribution
is on behalf of a company, the term "you" also means the company
identified in the signature area below.

<ol>









|
|
|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
<h1 align="center">
Fossil SCM Contributor Agreement
</h1>

<p>
This agreement applies to your contribution of material to the
Fossil Software Configuration Management System ("Fossil") that is
managed by Hipp, Wyrick &amp; Company, Inc. ("Hwaci") and
sets out the intellectual property rights you grant to Hwaci in the
contributed material.
The terms "contribution" and "contributed material" mean any source code,
object code, patch, tool, sample, graphic, specification, manual,
documentation, or any other material posted, submitted, or uploaded by
you to the Fossil project.
 The term "you" means the person identified
and signing at the bottom of this document.  If your contribution
is on behalf of a company, the term "you" also means the company
identified in the signature area below.

<ol>
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
     contribution as if each of us were the sole owners, and if one of us
     makes a derivative work of your contribution, the one who makes
     (or has made) the derivative work will be the sole owner of that
     derivative work.
<li> You agree that you will not assert any moral rights in your
     contribution against Hwaci, Hwaci's licensees or transferees, or
     any other user or consumer of your contribution.
<li> You agree that Hwaci may register a copyright in your contribution and 
     exercise all ownership rights associated with it.
<li> You agree that neither you nor Hwaci has any duty to consult with,
     obtain the consent of, or pay or render an accounting to the other
     for any use or distribution of your contribution.
</ul>

<li><p>






|







31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
     contribution as if each of us were the sole owners, and if one of us
     makes a derivative work of your contribution, the one who makes
     (or has made) the derivative work will be the sole owner of that
     derivative work.
<li> You agree that you will not assert any moral rights in your
     contribution against Hwaci, Hwaci's licensees or transferees, or
     any other user or consumer of your contribution.
<li> You agree that Hwaci may register a copyright in your contribution and
     exercise all ownership rights associated with it.
<li> You agree that neither you nor Hwaci has any duty to consult with,
     obtain the consent of, or pay or render an accounting to the other
     for any use or distribution of your contribution.
</ul>

<li><p>
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
     company (if applicable).
</ul>
</ol>

<p>By filling in the following information and signing your name,
you agree to be bound by all of the terms
set forth in this agreement.  Please print clearly.</p>
 
<center>
<p><table width="80%" border="1" cellpadding="0" cellspacing="0">
<tr><td width="20%" valign="top">Your name &amp email:</td><td width="80%">

    <!-- Replace this line with your name and email --> &nbsp;<p>&nbsp;

</td></tr>






|







71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
     company (if applicable).
</ul>
</ol>

<p>By filling in the following information and signing your name,
you agree to be bound by all of the terms
set forth in this agreement.  Please print clearly.</p>

<center>
<p><table width="80%" border="1" cellpadding="0" cellspacing="0">
<tr><td width="20%" valign="top">Your name &amp email:</td><td width="80%">

    <!-- Replace this line with your name and email --> &nbsp;<p>&nbsp;

</td></tr>
Changes to www/customgraph.md.
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
Fossil includes several options for changing the graph's style without having
to delve into CSS. These can be found in the details.txt file of your skin or
under Admin/Skins/Details in the web UI.

*   ###`timeline-arrowheads`

    Set this to `0` to hide arrowheads on primary child lines.
    
*   ###`timeline-circle-nodes`

    Set this to `1` to make check-in nodes circular instead of square.

*   ###`timeline-color-graph-lines`

    Set this to `1` to colorize primary child lines.






|







8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
Fossil includes several options for changing the graph's style without having
to delve into CSS. These can be found in the details.txt file of your skin or
under Admin/Skins/Details in the web UI.

*   ###`timeline-arrowheads`

    Set this to `0` to hide arrowheads on primary child lines.

*   ###`timeline-circle-nodes`

    Set this to `1` to make check-in nodes circular instead of square.

*   ###`timeline-color-graph-lines`

    Set this to `1` to colorize primary child lines.
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
## <a id="pos-elems"></a>Positioning Elements

These elements aren't intended to be seen. They're only used to help position
the graph and its visible elements.

*   ###<a id="tl-canvas"></a>`.tl-canvas`
    
    Set the left and right margins on this class to give the desired amount
    of space between the graph and its adjacent columns in the timeline.
  
    #### Additional Classes
  
    * `.sel`: See [`.tl-node`](#tl-node) for more information.

*   ###<a id="tl-rail"></a>`.tl-rail`

    Think of rails as invisible vertical lines on which check-in nodes are
    placed. The more simultaneous branches in a graph, the more rails required
    to draw it. Setting the `width` property on this class determines the






|


|

|







41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
## <a id="pos-elems"></a>Positioning Elements

These elements aren't intended to be seen. They're only used to help position
the graph and its visible elements.

*   ###<a id="tl-canvas"></a>`.tl-canvas`

    Set the left and right margins on this class to give the desired amount
    of space between the graph and its adjacent columns in the timeline.

    #### Additional Classes

    * `.sel`: See [`.tl-node`](#tl-node) for more information.

*   ###<a id="tl-rail"></a>`.tl-rail`

    Think of rails as invisible vertical lines on which check-in nodes are
    placed. The more simultaneous branches in a graph, the more rails required
    to draw it. Setting the `width` property on this class determines the
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
These are the elements you can actually see on the timeline graph: the nodes,
arrows, and lines. Each of these elements may also have additional classes
attached to them, depending on their context.

*   ###<a id="tl-node"></a>`.tl-node`

    A node exists for each check-in in the timeline.
  
    #### Additional Classes
    
    *   `.leaf`: Specifies that the check-in is a leaf (i.e. that it has no
        children in the same branch).
    
    *   `.merge`: Specifies that the check-in contains a merge.
    
    *   `.sel`: When the user clicks a node to designate it as the beginning
        of a diff, this class is added to both the node itself and the
        [`.tl-canvas`](#tl-canvas) element. The class is removed from both
        elements when the node is clicked again.

*   ###<a id="tl-arrow"></a>`.tl-arrow`

    Arrows point from parent nodes to their children. Technically, this
    class is just for the arrowhead. The rest of the arrow is composed
    of [`.tl-line`](#tl-line) elements.

    There are six additional classes that are used to distinguish the different
    types of arrows. However, only these combinations are valid:
    
    *   `.u`: Up arrow that points to a child from its primary parent.
    
    *   `.u.sm`: Smaller up arrow, used when there is limited space between
        parent and child nodes.
    
    *   `.merge.l` or `.merge.r`: Merge arrow pointing either to the left or
        right.
    
    *   `.warp`: A timewarped arrow (always points to the right), used when a
        misconfigured clock makes a check-in appear to have occurred before its
        parent ([example](https://www.sqlite.org/src/timeline?c=2010-09-29&nd)).
    
*   ###<a id="tl-line"></a>`.tl-line`

    Along with arrows, lines connect parent and child nodes. Line thickness is
    determined by the `width` property, regardless of whether the line is
    horizontal or vertical. You can also use borders to create special line
    styles. Here's a CSS snippet for making dotted merge lines:

        .tl-line.merge {
          width: 0;
          background: transparent;
          border: 0 dotted #000;
        }
        .tl-line.merge.h {
          border-top-width: 1px;
        }
        .tl-line.merge.v {
          border-left-width: 1px;
        }

    #### Additional Classes
    
    *   `.merge`: A merge line.
    
    *   `.h` or `.v`: Horizontal or vertical.
    
    *   `.warp`: A timewarped line.


## <a id="default-css"></a>Default Timeline Graph CSS

    .tl-canvas {
      margin: 0 6px 0 10px;






|

|


|

|













|

|


|


|



|




















|

|

|







82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
These are the elements you can actually see on the timeline graph: the nodes,
arrows, and lines. Each of these elements may also have additional classes
attached to them, depending on their context.

*   ###<a id="tl-node"></a>`.tl-node`

    A node exists for each check-in in the timeline.

    #### Additional Classes

    *   `.leaf`: Specifies that the check-in is a leaf (i.e. that it has no
        children in the same branch).

    *   `.merge`: Specifies that the check-in contains a merge.

    *   `.sel`: When the user clicks a node to designate it as the beginning
        of a diff, this class is added to both the node itself and the
        [`.tl-canvas`](#tl-canvas) element. The class is removed from both
        elements when the node is clicked again.

*   ###<a id="tl-arrow"></a>`.tl-arrow`

    Arrows point from parent nodes to their children. Technically, this
    class is just for the arrowhead. The rest of the arrow is composed
    of [`.tl-line`](#tl-line) elements.

    There are six additional classes that are used to distinguish the different
    types of arrows. However, only these combinations are valid:

    *   `.u`: Up arrow that points to a child from its primary parent.

    *   `.u.sm`: Smaller up arrow, used when there is limited space between
        parent and child nodes.

    *   `.merge.l` or `.merge.r`: Merge arrow pointing either to the left or
        right.

    *   `.warp`: A timewarped arrow (always points to the right), used when a
        misconfigured clock makes a check-in appear to have occurred before its
        parent ([example](https://www.sqlite.org/src/timeline?c=2010-09-29&nd)).

*   ###<a id="tl-line"></a>`.tl-line`

    Along with arrows, lines connect parent and child nodes. Line thickness is
    determined by the `width` property, regardless of whether the line is
    horizontal or vertical. You can also use borders to create special line
    styles. Here's a CSS snippet for making dotted merge lines:

        .tl-line.merge {
          width: 0;
          background: transparent;
          border: 0 dotted #000;
        }
        .tl-line.merge.h {
          border-top-width: 1px;
        }
        .tl-line.merge.v {
          border-left-width: 1px;
        }

    #### Additional Classes

    *   `.merge`: A merge line.

    *   `.h` or `.v`: Horizontal or vertical.

    *   `.warp`: A timewarped line.


## <a id="default-css"></a>Default Timeline Graph CSS

    .tl-canvas {
      margin: 0 6px 0 10px;
Changes to www/customskin.md.
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
"footer.txt", and "header.txt",
that describe the CSS, rendering options,
footer, and header for that skin, respectively.

The skin of a repository can be changed to any of the built-in skins using
the web interface by going to the /setup_skin web page (requires Admin
privileges) and clicking the appropriate button.  Or, the --skin command
line option can be used for the 
[fossil ui](../../../help?cmd=ui) or
[fossil server](../../../help?cmd=server) commands to force that particular
instance of Fossil to use the specified built-in skin.

Sharing Skins
-------------

The skin of a repository is not part of the versioned state and does not
"push" or "pull" like checked-in files.  The skin is local to the 
repository.  However, skins can be shared between repositories using
the [fossil config](../../../help?cmd=configuration) command.
The "fossil config push skin" command will send the local skin to a remote
repository and the "fossil config pull skin" command will import a skin
from a remote repository.  The "fossil config export skin FILENAME"
will export the skin for a repository into a file FILENAME.  This file
can then be imported into a different repository using the






|








|







52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
"footer.txt", and "header.txt",
that describe the CSS, rendering options,
footer, and header for that skin, respectively.

The skin of a repository can be changed to any of the built-in skins using
the web interface by going to the /setup_skin web page (requires Admin
privileges) and clicking the appropriate button.  Or, the --skin command
line option can be used for the
[fossil ui](../../../help?cmd=ui) or
[fossil server](../../../help?cmd=server) commands to force that particular
instance of Fossil to use the specified built-in skin.

Sharing Skins
-------------

The skin of a repository is not part of the versioned state and does not
"push" or "pull" like checked-in files.  The skin is local to the
repository.  However, skins can be shared between repositories using
the [fossil config](../../../help?cmd=configuration) command.
The "fossil config push skin" command will send the local skin to a remote
repository and the "fossil config pull skin" command will import a skin
from a remote repository.  The "fossil config export skin FILENAME"
will export the skin for a repository into a file FILENAME.  This file
can then be imported into a different repository using the
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
the skin of the repository from which it was cloned.

Header And Footer Processing
----------------------------

The header.txt and footer.txt files of a scan are merely the HTML text
of the header and footer.  Except, before being prepended and appended to
the content, the header and footer text are run through a 
[TH1 interpreter](./th1.md) that might adjust the text as follows:

  *  All text within &lt;th1&gt;...&lt;/th1&gt; is elided from the
     output and that text is instead run as a TH1 script.  That TH1
     script has the opportunity to insert new text in place of itself,
     or to inhibit or enable the output of subsequent text.







|







91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
the skin of the repository from which it was cloned.

Header And Footer Processing
----------------------------

The header.txt and footer.txt files of a scan are merely the HTML text
of the header and footer.  Except, before being prepended and appended to
the content, the header and footer text are run through a
[TH1 interpreter](./th1.md) that might adjust the text as follows:

  *  All text within &lt;th1&gt;...&lt;/th1&gt; is elided from the
     output and that text is instead run as a TH1 script.  That TH1
     script has the opportunity to insert new text in place of itself,
     or to inhibit or enable the output of subsequent text.

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
and for all scripts contained within them both.  Hence, any global
TH1 variables that are set by the header are available to the footer.

TH1 Variables
-------------

Before expanding the TH1 within the header and footer, Fossil first
initializes a number of TH1 variables to values that depend on 
respository settings and the specific page being generated.

   *   **project_name** - The project_name variable is filled with the
       name of the project as configured under the Admin/Configuration
       menu.

   *   **title** - The title variable holds the title of the page being






|







136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
and for all scripts contained within them both.  Hence, any global
TH1 variables that are set by the header are available to the footer.

TH1 Variables
-------------

Before expanding the TH1 within the header and footer, Fossil first
initializes a number of TH1 variables to values that depend on
respository settings and the specific page being generated.

   *   **project_name** - The project_name variable is filled with the
       name of the project as configured under the Admin/Configuration
       menu.

   *   **title** - The title variable holds the title of the page being
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
   *   **secureurl** - The same as $baseurl except that if the scheme is
                       "http:" it is changed to "https:"

   *   **home** - The $baseurl without the scheme and hostname.  For example,
       if the $baseurl is "http://projectX.com/cgi-bin/fossil" then the
       $home will be just "/cgi-bin/fossil".

   *   **index_page** - The landing page URI as 
       specified by the Admin/Configuration setup page.

   *   **current_page** - The name of the page currently being processed,
       without the leading "/" and without query parameters.
       Examples:  "timeline", "doc/trunk/README.txt", "wiki".  

   *   **csrf_token** - A token used to prevent cross-site request forgery.

   *   **release_version** - The release version of Fossil.  Ex: "1.31"

   *   **manifest_version** - A prefix on the SHA1 check-in hash of the
       specific version of fossil that is running.  Ex: "\[47bb6432a1\]"






|




|







160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
   *   **secureurl** - The same as $baseurl except that if the scheme is
                       "http:" it is changed to "https:"

   *   **home** - The $baseurl without the scheme and hostname.  For example,
       if the $baseurl is "http://projectX.com/cgi-bin/fossil" then the
       $home will be just "/cgi-bin/fossil".

   *   **index_page** - The landing page URI as
       specified by the Admin/Configuration setup page.

   *   **current_page** - The name of the page currently being processed,
       without the leading "/" and without query parameters.
       Examples:  "timeline", "doc/trunk/README.txt", "wiki".

   *   **csrf_token** - A token used to prevent cross-site request forgery.

   *   **release_version** - The release version of Fossil.  Ex: "1.31"

   *   **manifest_version** - A prefix on the SHA1 check-in hash of the
       specific version of fossil that is running.  Ex: "\[47bb6432a1\]"
Changes to www/fileformat.wiki.
172
173
174
175
176
177
178
179

180
181
182
183
184
185
186
is derived.  Each argument is an 40-character lowercase 
hexadecimal SHA1 of the predecessor manifest.  All arguments
to the P-card must be unique to that line.
The first predecessor is the direct ancestor of the manifest.
Other arguments define manifests with which the first was
merged to yield the current manifest.  Most manifests have
a P-card with a single argument.  The first manifest in the
project has no ancestors and thus has no P-card.


A manifest has zero or more Q-cards.  A Q-card is similar to a P-card
in that it defines a predecessor to the current check-in.  But
whereas a P-card defines the immediate ancestor or a merge
ancestor, the Q-card is used to identify a single check-in or a small
range of check-ins which were cherry-picked for inclusion in or
exclusion from the current manifest.  The first argument of






|
>







172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
is derived.  Each argument is an 40-character lowercase 
hexadecimal SHA1 of the predecessor manifest.  All arguments
to the P-card must be unique to that line.
The first predecessor is the direct ancestor of the manifest.
Other arguments define manifests with which the first was
merged to yield the current manifest.  Most manifests have
a P-card with a single argument.  The first manifest in the
project has no ancestors and thus has no P-card or (depending
on the Fossil version) an empty P-card (no arguments).

A manifest has zero or more Q-cards.  A Q-card is similar to a P-card
in that it defines a predecessor to the current check-in.  But
whereas a P-card defines the immediate ancestor or a merge
ancestor, the Q-card is used to identify a single check-in or a small
range of check-ins which were cherry-picked for inclusion in or
exclusion from the current manifest.  The first argument of
Changes to www/fossil-v-git.wiki.
1
2
3
4
5
6
7

8
9
10
11
12
13
14
15
16




17
18
19
20
21
22
23
24

25
26
27
28
29
30
31

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50


51
52
53
54
55
56


57
58
59
60

61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

76
77







78
79
80



81
82






83



84

85
86
87
88

89
90
91
92
93
94
95
96
97
98
99


100
101
102
103




104
105
106

107
108
109

110
111


112

113


114
115
116

117
118
119
120
121
122
123
124
125


126
127
128
129
130





131
132


133
134
135
136
137
138

139
140
141
142
143
144
145

146



147
148
149
150


151
152

153
154

155
156
157


158
159
160
161
162
163
164
165
166
167
168
169
170
171
172




173
174
175
176
177

178
179
180
181


182

183
184

185


186
187


188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203

204
205
206
207
208

209
210
211



212

213
214

215
216




217
218
219

220
221
222
223
224
225
226
227
228
229
230
231
232
233

234
235
236

237

238
239





240
241

242
243

244
245
246
247
248

249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272


<title>Fossil Versus Git</title>

<h2>1.0 Don't Stress!</h2>

If you start out using one DVCS and later decide you like the other better,
it is [./inout.wiki | easy to change].


But it also helps to be informed about the differences between
[http://git-scm.com | Git] and Fossil.  See the table below for
a high-level summary and the text that follows for more details.

Keep in mind that you are reading this on a Fossil website,
so the information here
might be biased in favor of Fossil.  Ask around with people who have
used both Fossil and Git for other opinions.





<h2>2.0 Executive Summary:</h2>

<blockquote><center><table border=1 cellpadding=5>
<tr><th width="50%">GIT</th><th width="50%">FOSSIL</th></tr>
<tr><td>File versioning only</td>
    <td>Versioning, Tickets, Wiki, and Technotes</td></tr>
<tr><td>Sharding</td><td>Replicating</td></tr>
<tr><td>Developer branches</td><td>Feature branches</td></tr>

<tr><td>Complex</td><td>Intuitive</td></tr>
<tr><td>Separate web tools</td><td>Integrated Web interface</td></tr>
<tr><td>Lots of little tools</td><td>Single executable</td></tr>
<tr><td>Pile-of-files repository</td>
    <td>Single-file relational database</td></tr>
<tr><td>One check-out per repository</td>
    <td>Many check-outs per repository</td></tr>

<tr><td>Uses "<tt>rebase</tt>"</td><td>Immutable</td></tr>
<tr><td>GPL</td><td>BSD</td></tr>
</table></center></blockquote>

<h2>3.0 Discussion</h2>

<h3>3.1 Feature Set</h3>

Git provides file versioning services only, whereas Fossil adds an
integrated [./wikitheory.wiki | wiki],
[./bugtheory.wiki | ticketing &amp; bug tracking],
[./embeddeddoc.wiki | embedded documentation], and
[./event.wiki | Technical notes].
These additional capabilities are available for Git as 3rd-party
user-installed add-ons, but with Fossil they are integrated into
the design.  One way to describe Fossil is that it is
"[https://github.com/ | github]-in-a-box".

<h3>3.2 Sharding versus Replicating</h3>



Git makes it easy for each repository in a project to hold a subset of
the branches for that project.  In fact, it is entirely possible and not
uncommon for no repository in the project to hold all the different code
versions for a project.  Instead the information is distributed.
Individual developers have one or more private branches.  A hierarchy


of integrators merge changes from individual developers into collaborative
branches, until all the changes are merged together at the top-level master
branch.  And all of this can be accomplished without having to have all the
code in any one repository.  Developers or groups of developers can share

only those branches that they want to share and keep other branches of the
project private.  This is analogous to sharding a distributed database.

Fossil allows private branches, but its default mode is to share everything.
And so in a Fossil project, all repositories tend to contain all of the
content at all times.  This is analogous to replication in a
distributed database.

The Git model works best for large projects, like the
Linux kernel for which Git was designed.
Linus Torvalds does not need or want to see a thousand
different branches, one for each contributor.  Git allows intermediary
"gate-keepers" to merge changes from multiple lower-level developers
into a single branch and only present Linus with a handful of branches
at a time.  Git encourages a programming model where each developer

works in his or her own branch and then merges changes up the hierarchy
until they reach the master branch.








Fossil is designed for smaller and non-hierarchical teams where all
developers are operating directly on the master branch, or at most



a small number of well-defined branches.
The [./concepts.wiki#workflow | autosync] mode of Fossil makes it easy






for multiple developers to work on a single branch and maintain



linear development on that branch and avoid needless forking

and merging.

<h3>3.3 Branches</h3>


Git (and especially GitHub) encourages a workflow where each developer
has his or her own branch or branches.  Developers then send "pull requests"
to have their changes be merged into "official" branches by integrators.
For example, the Linux kernel team has a hierarchy of integrators with
Linus Torvalds at the root.  Individual developers each have their own
private branches of the source tree into which they make their own changes.
They then encourage first-tier integrators to pull those changes.  The
first-tier integrators merge together changes from multiple contributors
then try to get second-tier integrators to pull their branches.  The
changes merge up the hierarchy until (hopefully) they are pulled into
"Linus's branch", at which time they become part of the "official" Linux.



In Git, each branch is "owned" by the person who creates it and works
on it.  The owner might pull changes from others, but the owner is always
in control of the branch.  Branches are developer-centric.





Fossil, on the other hand, encourages a workflow where branches are
associated with features or releases, not individual developers.

All developers share all branches in common, and two
or more developers can and often do intersperse commits onto the same branch.
Branches do not belong to individuals.  All branches are read/write

accessible to all developers at all times.  There is no need
for integrators to merge together changes from various independent


developers.  Instead, all of the developers work together cooperatively

and the changes stay integrated naturally.



So to a first approximation, branches in Git are developer-centric whereas
branches in Fossil are feature-centric.


The Git approach scales much better for large projects like the Linux
kernel with thousands of contributors who in many cases don't even know
each other's names.  The integrators serve a gatekeeper role to help keep
undesirable code out of the official Linux source tree.  On the other hand,
not many projects are as big or as loosely organized as the Linux kernel.
Most projects have a small team of developers who all know each other
well and trust each other, and who enjoy working together collaboratively
without the overhead and hierarchy of integrators.



One consequence of the "everybody-sees-everything" focus of Fossil is that
branch names are global and are part of the distributed and synchronized
content of a Fossil repository, rather than being private and user-specific
as they are in Git.






<h3>3.4 Complexity</h3>



Git is a complex system.  It can be tricky to use and requires a fair
amount of knowledge and experience to master.  Fossil strives to be
a much simpler system that can be learned and mastered much more quickly.
Fossil strives to have fewer "gotchas" and quirks that can trip up a
developer.


The ideal VCS should just get out of the way of the developer and allow
the developer to focus 100% of their thinking on the project under
development.  One should not have to stop and think about how to operate
the VCS.  Of course, no VCS is ideal.  Every VCS requires the developer
to think about version control to some extent.  But one wants to minimize
the thinking about version control.





Git requires the developer to maintain a more complex mental model than
most other DVCSes.  Git takes longer to learn.  And you have to spend
more time thinking about what you are doing with Git.



Fossil strives for simplicity.  Fossil wants to be easy to learn and to
require little thinking about how to operating it.

[./quotes.wiki | Reports from the field]
indicate that Fossil is mostly successful at this effort.


Fossil will <u>never</u> get you into anything like the
"disconnected head state" which has frustrated so many Git users.



<h3>3.5 Web Interface</h3>

Git has a web interface, but it requires a fair amount of setup and an
external web server.  Fossil comes with a fully functional
[./webui.wiki | built-in web-server]
and a really simple mechanism (the "[/help/ui|fossil ui]" command) to
automatically start the web server and bring up a web browser to navigate
it.  The web interface for Fossil is not only easier to set up, it is also
more powerful and easier to use.  The web interface to Fossil is a practical
replacement to the 3rd-party "GUI Tools" that users often employ to operate
Git.

<h3>3.6 Implementation Strategy</h3>





Git consists of a collection of many little programs.  Git needs to be
"installed" using some kind of installer or package tool.  Git can be
tricky to install and get working, especially for users without
administrative privileges.


Fossil is a single self-contained executable.  To "install" Fossil one
has merely to download a precompiled binary and place that binary
somewhere on $PATH.  To uninstall Fossil, simply delete the binary.
To upgrade Fossil, replace the old binary with a new one.




Fossil is designed to be trivial to install, uninstall, and upgrade so
that developers can spend more time working on their own projects and

much less time configuring their version control system.



<h3>3.7 Repository Storage</h3>



A Git repository is a "pile-of-files" in the ".git" directory at the
root of the working checkout.  There is a one-to-one correspondence
between repositories and working checkouts.  A power-loss or system crash
in the middle of Git operation can damage or corrupt the Git repository.

A Fossil repository consists of a single disk file.  A single Fossil
repository can serve multiple simultaneous working checkouts.
A Fossil repository is an SQLite database, so it is highly resistant
to damage from a power-loss or system crash - incomplete transactions
are simply rolled back after the system reboots.

<h3>3.8 Check-outs Per Repository</h3>

In Git, a check-out and a repository are joined in a fundamental way
so that only a single version of the project history, or a single branch,

can be open at once.  If you have a project with multiple branches and
you want to have two or more branches open at the same time (perhaps to
do performance comparisons, or maybe to run simultaneous builds using
different compile-time options) then in Git you actually have to create
a new clone of the repository for each open checkout.


In Fossil, the repository and the check-out are distinct entities and
so a single repository can support multiple simultaneous checkouts.



This feature is <em>extensively</em> used by the Fossil developers

themselves.  Perhaps we are biased, but we not understand how anyone
can work efficiently with just one check-out per repository.


<h3>3.9 Audit Trail</h3>





Git features the "rebase" command which can be used to change the
sequence of check-ins in the repository.  Rebase can be used to "clean up"

a complex sequence of check-ins to make their intent easier for others
to understand.  This is important if you view the history of a project
as part of the documentation for the project.

Fossil takes an opposing view.  Fossil views history as sacrosanct and
stubbornly refuses to change it.
Fossil allows mistakes to be corrected (for example, check-in comments
can be revised, and check-ins can be moved onto new branches even after
the check-in has occurred) but the correction is an addition to the repository
and the original actions are preserved and displayed alongside
the corrections, thus preserving an historically accurate audit trail.
This is analogous to an accounting practice of marking through an incorrect
entry in a ledger and writing a correction beside it.



To put it another way, Git remembers what you should have done whereas
Fossil remembers what you actually did.



The lack of a "rebase" command and the inability to rewrite history
is considered a feature of Fossil, not an omission or bug.






<h3>3.10 License</h3>


Both Git and Fossil are open-source.  Git is under

[http://www.gnu.org/licenses/gpl.html | GPL] whereas Fossil is
under the
[http://en.wikipedia.org/wiki/BSD_licenses | two-clause BSD license].
The different licenses parallel, to some extent, the different philosophies
of Git and Fossil.

There are exceptions on both sides, but to a first approximation, Git
works better for GPL projects and Fossil works better for BSD projects.

The GPL is designed to provide a very contributor-friendly environment.
No legal paperwork is needed to contribute to a GPL project because
the GPL is cleverly designed so that the act of contributing
to the project (or even reading the code for the project) constitutes
an acceptance of the licensing terms.  GPL encourages a bazaar-style
development model, with lots of anonymous programmers contributing
drive-by patches.  The theory is that with many eyeballs, all bugs
are shallow.  Surprisingly, this has actually been demonstrated to
work in many well-known projects.

The BSD-style licenses are more user-friendly.  BSD-style licenses
place fewer restrictions on the users of the software at the expense 
of making it more difficult to contribute changes or enhancements.
To protect against IP claims,
every contributor to a BSD-style project must sign legal documents in
which they agree to release their contributions under the same license.
(Some BSD-licensed projects omit this formality, but do so at their peril.)
A BSD-style license encourages a more cathedral-style approach to development.
There is a small team of developers.  Drive-by patches and anonymous
contributors are discouraged and/or prohibited.  Contributors are expected
to be experts and be available to support their changes for the long-term.






|

>
|
|







>
>
>
>






|
|
>
|
|
|
<
<


>
|












|




|
>
>

<
<
<
<
|
>
>
|
|
|
|
>
|
|

|
<
|
|

|
|
|
|
|
<
|
>
|
|
>
>
>
>
>
>
>

<
|
>
>
>
|
|
>
>
>
>
>
>
|
>
>
>
|
>
|

|

>
|
<
<
<
<
<
<
|
<
<
|
>
>

<
<
<
>
>
>
>

<
|
>
|
|
|
>
|
|
>
>
|
>
|
>
>

<
<
>

<
<
<
<
<
<
<
<
>
>

<
<
<
|
>
>
>
>
>

|
>
>

<
<
|
|
|
>

<
|
|
|
|
|
>

>
>
>
|
<
<

>
>
|
<
>
|
|
>

<
<
>
>

<
|
|
|
|
|
<
<
<
<
|

|

>
>
>
>
|
|
<
|

>
|
|
|
|
>
>

>
|
|
>
|
>
>

|
>
>

<
<
|
|
|
|
|
|
|
|
|
|

|
|
>
|
<
|
<
|
>

|
|
>
>
>
|
>
|
<
>

|
>
>
>
>

<
<
>
|
<
<

|
|
<
|
|
|
|
|
|

>

<
|
>

>
|
|
>
>
>
>
>

<
>

|
>
|
|
<
<
|
>
|
|

<
|
|
|
|
|
|
|
|

|
|
|
|
|
|
|
|
|
|
|
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33


34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58




59
60
61
62
63
64
65
66
67
68
69
70

71
72
73
74
75
76
77
78

79
80
81
82
83
84
85
86
87
88
89
90

91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114






115


116
117
118
119



120
121
122
123
124

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140


141
142








143
144
145



146
147
148
149
150
151
152
153
154
155
156


157
158
159
160
161

162
163
164
165
166
167
168
169
170
171
172


173
174
175
176

177
178
179
180
181


182
183
184

185
186
187
188
189




190
191
192
193
194
195
196
197
198
199

200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221


222
223
224
225
226
227
228
229
230
231
232
233
234
235
236

237

238
239
240
241
242
243
244
245
246
247
248

249
250
251
252
253
254
255
256


257
258


259
260
261

262
263
264
265
266
267
268
269
270

271
272
273
274
275
276
277
278
279
280
281
282

283
284
285
286
287
288


289
290
291
292
293

294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
<title>Fossil Versus Git</title>

<h2>1.0 Don't Stress!</h2>

If you start out using one DVCS and later decide you like the other better,
you can easily [./inout.wiki | move your content]&#185;.

Fossil and [http://git-scm.com | Git] are very similar in many respects,
but there are also important differences.
See the table below for
a high-level summary and the text that follows for more details.

Keep in mind that you are reading this on a Fossil website,
so the information here
might be biased in favor of Fossil.  Ask around with people who have
used both Fossil and Git for other opinions.

&#185;<small><i>Git does not support
wiki, tickets, or tech-notes, so those elements will not transfer when
exporting from Fossil to Git.</i></small>

<h2>2.0 Executive Summary:</h2>

<blockquote><center><table border=1 cellpadding=5>
<tr><th width="50%">GIT</th><th width="50%">FOSSIL</th></tr>
<tr><td>File versioning only</td>
    <td>Versioning, Tickets, Wiki, and Technotes</td></tr>
<tr><td>Ad-hoc, pile-of-files key/value database</td>
    <td>Relational SQL database</td></tr>
<tr><td>Bazaar-style development</td><td>Cathedral-style development</td></tr>
<tr><td>Designed for Linux development</td>
    <td>Designed for SQLite development</td></tr>
<tr><td>Lots of little tools</td><td>Stand-alone executable</td></tr>


<tr><td>One check-out per repository</td>
    <td>Many check-outs per repository</td></tr>
<tr><td>Remembers what you should have done</td>
    <td>Remembers what you actually did</td></tr>
<tr><td>GPL</td><td>BSD</td></tr>
</table></center></blockquote>

<h2>3.0 Discussion</h2>

<h3>3.1 Feature Set</h3>

Git provides file versioning services only, whereas Fossil adds an
integrated [./wikitheory.wiki | wiki],
[./bugtheory.wiki | ticketing &amp; bug tracking],
[./embeddeddoc.wiki | embedded documentation], and
[./event.wiki | Technical notes].
These additional capabilities are available for Git as 3rd-party and/or
user-installed add-ons, but with Fossil they are integrated into
the design.  One way to describe Fossil is that it is
"[https://github.com/ | github]-in-a-box".

If you clone Git's self-hosting repository you get just Git's source code.
If you clone Fossil's self-hosting repository, you get the entire
Fossil website - source code, documentation, ticket history, and so forth.





For developers who choose to self-host projects (rather than using a
3rd-party service such as GitHub) Fossil is much easier to set up, since
the stand-alone Fossil executable together with a 2-line CGI script
suffice to instantiate a full-featured developer website.  To accomplish
the same using Git requires locating, installing, configuring, integrating, 
and managing a wide assortment of separate tools.  Standing up a developer
website using Fossil can be done in minutes, whereas doing the same using
Git requires hours or days.

<h3>3.2 Database</h3>

The baseline data structures for Fossil and Git are the same (modulo

formatting details).  Both systems store check-ins as immutable
objects referencing their immediate ancestors and named by their SHA1 hash.

The difference is that Git stores its objects as individual files 
in the ".git" folder or compressed into
bespoke "pack-files", whereas Fossil stores its objects in a 
relational ([https://www.sqlite.org/|SQLite]) database file.  To put it
another way, Git uses an ad-hoc pile-of-files key/value database whereas

Fossil uses a proven, general-purpose SQL database.  This
difference is more than an implementation detail.  It
has important consequences.

With Git, one can easily locate the ancestors of a particular check-in
by following the pointers embedded the check-in object, but it is
difficult to go the other direction and locate the descendants of a
check-in.  It is so difficult, in fact, that neither native Git nor
GitHub provide this capability.  With Git, if you are looking at some
historical check-in then you cannot ask
"what came next" or "what are the children of this check-in".


Fossil, on the other hand, parses essential information about check-ins
(parents, children, committers, comments, files changed, etc.) 
into a relational database that can be easily 
queried using concise SQL statements to find both ancestors and 
descendents of a check-in.

Leaf check-ins in Git that lack a "ref" become "detached", making them
difficult to locate and subject to garbage collection.  This
"detached head" problem has caused untold grief for countless
Git users.  With Fossil, all check-ins are easily located using
a variety of attributes (parents, children, committer, date, full-text
search of the check-in comment) and so detached heads are simply not possible.

The ease with which check-ins can be located and queried in Fossil
has resulted in a huge variety of reports and status screens 
([./webpage-ex.md|examples]) that show project state
in ways that help developers
maintain enhanced awareness and comprehension
and avoid errors.

<h3>3.3 Cathedral vs. Bazaar</h3>

Fossil and Git promote different development styles.  Git promotes a
"bazaar" development style in which numerous anonymous developers make






small and sometimes haphazard contributions.  Fossil


promotes a "cathedral" development model in which the project is
closely supervised by an highly engaged architect and implemented by 
a clique of developers.




Nota Bene:  This is not to say that Git cannot be used for cathedral-style
development or that Fossil cannot be used for bazaar-style development.
They can be.  But those modes are not their design intent nor the their
low-friction path.


Git encourages a style in which individual developers work in relative
isolation, maintaining their
own branches and the occasionally rebasing and pushing selected changes up
to the main repository.  Developers using Git often have their own
private branches that nobody else ever sees.  Work becomes siloed.
This is exactly what one wants when doing bazaar-style development.

Fossil, in contrast, strives to keep all changes from all contributors
mirrored in the main repository (in separate branches) at all times.
Work in progress from one developer is readily visible to all other
developers and to the project leader, well before the code is ready
to integrate.  Fossil places a lot of emphasis on reporting the state
of the project, and the changes underway by all developers, so that
all developers and especially the project leader can maintain a better
mental picture of what is happening, and better situational awareness.



<h3>3.4 Linux vs. SQLite</h3>









Git was specifically designed to support the development of Linux.
Fossil was specifically designed to support the development of SQLite.




Both SQLite and Linux are important pieces of software.
SQLite is found on far more systems than Linux.  (Almost every Linux
system uses SQLite, but there are many non-Linux systems such as
iPhones, PlayStations, and Windows PC that use SQLite.)  On the other
hand, for those systems that do use Linux, Linux is a far more important
component.

Linux uses a bazaar-style development model.  There are thousands and
thousands of contributors, most of whom do not know each others names.
Git is designed for this scenario.



SQLite uses cathedral-style development.  95% of the code in SQLite
comes from just three programmers, 64% from just the lead developer.
And all SQLite developers know each other well and interact daily.
Fossil is designed for this development model.


<h3>3.5 Lots of little tools vs. Self-contained system</h3>

Git consists of many small tools, each doing one small part of the job,
which can be recombined (by experts) to perform powerful operations.
Git has a lot of complexity and many dependencies and requires an "installer"
script or program to get it running.

Fossil is a single self-contained stand-alone executable with hardly 
any dependencies.  Fossil can be (and often is) run inside a 
minimally configured chroot jail.  To install Fossil,
one merely puts the executable on $PATH.



The designer of Git says that the unix philosophy is to have lots of 
small tools that collaborate to get the job done.  The designer of 
Fossil says that the unix philosophy is "it just works".  Both 

individuals have written their DVCSes to reflect their own view 
of the "unix philosophy".

<h3>3.6 One vs. Many Check-outs per Repository</h3>



A "repository" in Git is a pile-of-files in the ".git" subdirectory
of a single check-out.  The check-out and the repository are inseperable.


With Fossil, a "repository" is a single SQLite database file 	
that can be stored anywhere.  There
can be multiple active check-outs from the same repository, perhaps
open on different branches or on different snapshots of the same branch.
Long-running tests or builds can be running in one check-out while




changes are being committed in another.

<h3>3.7 What you should have done vs. What you actually did</h3>

Git puts a lot of emphasis on maintaining
a "clean" check-in history.  Extraneous and experimental branches by
individual developers often never make it into the main repository.  And
branches are often rebased before being pushed, to make
it appear as if development had been linear.  Git strives to record what
the development of a project should have looked like had there been no

mistakes.

Fossil, in contrast, puts more emphasis on recording exactly what happened,
including all of the messy errors, dead-ends, experimental branches, and
so forth.  One might argue that this
makes the history of a Fossil project "messy".  But another point of view
is that this makes the history "accurate".  In actual practice, the 
superior reporting tools available in Fossil mean that the added "mess"
is not a factor.

One commentator has mused that Git records history according to
the victors, whereas Fossil records history as it actually happened.

<h3>3.8 GPL vs. BSD</h3>

Git is covered by the GPL license whereas Fossil is covered by 
a two-clause BSD license.

Consider the difference between GPL and BSD licenses:  GPL is designed
to make writing easier at the expense of making reading harder.  BSD is
designed to make reading easier and the expense of making writing harder.



To a first approximation, the GPL license grants the right to read 
source code to anyone who promises to give back enhancements.  In other
words, the act of reading GPL source code (a prerequiste for making changes)
implies acceptance of the license which requires updates to be contributed
back under the same license.  (The details are more complex, but the
foregoing captures the essence of the idea.)  A big advantage of the GPL
is that anybody can contribute to the code without having to sign additional
legal documentation because they have implied their acceptance of the GPL
license by the very act of reading the source code.  This means that a GPL
project can legally accept anonymous and drive-by patches.

The BSD licenses, on the other hand, make reading much easier than the GPL,
because the reader need not surrender proprietary interest
in their own enhancements.  On the flip side, BSD and similarly licensed
projects must obtain legal affidavits from authors before

new content can be added into the project.  Anonymous and drive-by

patches cannot be accepted.  This makes signing up new contributors for
BSD licensed projects harder.

The licenses on the implementations of Git and Fossil only apply to the
implementations themselves, not to the projects which the systems store.
Nevertheless, one can see a more GPL-oriented world-view in Git and a
more BSD-oriented world-view in Fossil.  Git encourages anonymous contributions
and siloed development, which are hallmarks of the GPL/bazaar approach to
software, whereas Fossil encourages a more tightly collaborative,
cliquish, cathedral-style approach more typical of BSD-licensed projects.


<h2>4.0 Missing Features</h2>

Most of the capabilities found in Git are also available in Fossil and
the other way around. For example, both systems have local check-outs,
remote repositories, push/pull/sync, bisect capabilities, and a "stash".
Both systems store project history as a directed acyclic graph (DAG) 
of immutable check-in objects.



But there are a few capabilities in one system that are missing from the
other.



<h3>4.1 Features found in Fossil but missing from Git</h3>


  *  <b>The ability to show descendents of a check-in.</b>

   Both Git and Fossil can easily find the ancestors of a check-in.  But
   only Fossil shows the descendents.  (It is possible to find the
   descendents of a check-in in Git using the log, but that is sufficiently
   difficult that nobody ever actually does it.)

  *  <b>Wiki, Embedded documentation, Trouble-tickets, and Tech-Notes</b>


   Git only provides versioning of source code.  Fossil strives to provide
   other related configuration management services as well.

  *  <b>Named branches</b>

   Branches in Fossil have persistent names that are propagated 
   to collaborators via [/help?cmd=push|push] and [/help?cmd=pull|pull].
   All developers see the same name on the same branch.  Git, in contrast,
   uses only local branch names, so developers working on the
   same project can (and frequently do) use a different name for the
   same branch.  


  *  <b>The [/help?cmd=all|fossil all] command</b>

   Fossil keeps track of all repositories and check-outs and allows
   operations over all of them with a single command.  For example, in
   Fossil is possible to request a pull of all repositories on a laptop
   from their respective servers, prior to taking the laptop off network.


   Or it is possible to do "fossil all status" to see if there are any
   uncommitted changes that were overlooked prior to the end of the workday.

  *  <b>The [/help?cmd=ui|fossil ui] command</b>


   Fossil supports an integrated web interface.  Some of the same features
   are available using third-party add-ons for Git, but they do not provide
   nearly as many features and they are not nearly as convenient to use.


<h2>4.2 Features found in Git but missing from Fossil</h2>

  *  <b>Rebase</b>

   Because of its emphasis on recording history exactly as it happened,
   rather than as we would have liked it to happen, Fossil deliberately
   does not provide a "rebase" command.  One can rebase manually in Fossil,
   with sufficient perserverence, but it not something that can be done with
   a single command.

  *  <b>Push or pull a single branch</b>

   The [/help?cmd=push|fossil push], [/help?cmd=pull|fossil pull], and
   [/help?cmd=sync|fossil sync] commands do not provide the capability to
   push or pull individual branches.  Pushing and pulling in Fossil is
   all or nothing.  This is in keeping with Fossil's emphasis on maintaining
   a complete record and on sharing everything between all developers.
Changes to www/fossil_prompt.sh.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#-------------------------------------------------------------------------
#   get_fossil_data()
#
# If the current directory is part of a fossil checkout, then populate
# a series of global variables based on the current state of that
# checkout. Variables are populated based on the output of the [fossil info]
# command.
#
# If the current directory is not part of a fossil checkout, set global
# variable $fossil_info_project_name to an empty string and return.
#
function get_fossil_data() { 
  fossil_info_project_name=""
  eval `get_fossil_data2`
}
function get_fossil_data2() {
  fossil info 2> /dev/null | sed 's/"//g'|grep "^[^ ]*:" | while read LINE ; do 
    local field=`echo $LINE | sed 's/:.*$//' | sed 's/-/_/'`
    local value=`echo $LINE | sed 's/^[^ ]*: *//'`
    echo fossil_info_${field}=\"${value}\"
  done
}

#-------------------------------------------------------------------------
#   set_prompt()
#
# Set the PS1 variable. If the current directory is part of a fossil
# checkout then the prompt contains information relating to the state
# of the checkout. 
#
# Otherwise, if the current directory is not part of a fossil checkout, it
# is set to a fairly standard bash prompt containing the host name, user
# name and current directory.
#
function set_prompt() {
  get_fossil_data
  if [ -n "$fossil_info_project_name" ] ; then 
    project=$fossil_info_project_name
    checkout=`echo $fossil_info_checkout | sed 's/^\(........\).*/\1/'`
    date=`echo $fossil_info_checkout | sed 's/^[^ ]* *..//' | sed 's/:[^:]*$//'`
    tags=$fossil_info_tags
    local_root=`echo $fossil_info_local_root | sed 's/\/$//'`
    local=`pwd | sed "s*${local_root}**" | sed "s/^$/\//"`












|




|











|







|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#-------------------------------------------------------------------------
#   get_fossil_data()
#
# If the current directory is part of a fossil checkout, then populate
# a series of global variables based on the current state of that
# checkout. Variables are populated based on the output of the [fossil info]
# command.
#
# If the current directory is not part of a fossil checkout, set global
# variable $fossil_info_project_name to an empty string and return.
#
function get_fossil_data() {
  fossil_info_project_name=""
  eval `get_fossil_data2`
}
function get_fossil_data2() {
  fossil info 2> /dev/null | sed 's/"//g'|grep "^[^ ]*:" | while read LINE ; do
    local field=`echo $LINE | sed 's/:.*$//' | sed 's/-/_/'`
    local value=`echo $LINE | sed 's/^[^ ]*: *//'`
    echo fossil_info_${field}=\"${value}\"
  done
}

#-------------------------------------------------------------------------
#   set_prompt()
#
# Set the PS1 variable. If the current directory is part of a fossil
# checkout then the prompt contains information relating to the state
# of the checkout.
#
# Otherwise, if the current directory is not part of a fossil checkout, it
# is set to a fairly standard bash prompt containing the host name, user
# name and current directory.
#
function set_prompt() {
  get_fossil_data
  if [ -n "$fossil_info_project_name" ] ; then
    project=$fossil_info_project_name
    checkout=`echo $fossil_info_checkout | sed 's/^\(........\).*/\1/'`
    date=`echo $fossil_info_checkout | sed 's/^[^ ]* *..//' | sed 's/:[^:]*$//'`
    tags=$fossil_info_tags
    local_root=`echo $fossil_info_local_root | sed 's/\/$//'`
    local=`pwd | sed "s*${local_root}**" | sed "s/^$/\//"`

Changes to www/index.wiki.
118
119
120
121
122
123
124
125

126
127

128
129
130
131
132
133
134
     available for discussing Fossil issues.
  *  [./stats.wiki | Performance statistics] taken from real-world projects
     hosted on Fossil.
  *  How to [./shunning.wiki | delete content] from a Fossil repository.
  *  How Fossil does [./password.wiki | password management].
  *  On-line [/help | help].
  *  Documentation on the
     [http://www.sqliteconcepts.org/THManual.pdf | TH1 scripting language] used

     to configure the ticketing subsystem and the [./th1.md | extra commands]
     provided by Fossil itself.

  *  A free hosting server for Fossil repositories is available at
     [http://chiselapp.com/].
  *  How to [./server.wiki | set up a server] for your repository.
  *  Customizing the [./custom_ticket.wiki | ticket system].
  *  Methods to [./checkin_names.wiki | identify a specific check-in].
  *  [./inout.wiki | Import and export] from and to Git.
  *  [./fossil-v-git.wiki | Fossil versus Git].






|
>
|
|
>







118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
     available for discussing Fossil issues.
  *  [./stats.wiki | Performance statistics] taken from real-world projects
     hosted on Fossil.
  *  How to [./shunning.wiki | delete content] from a Fossil repository.
  *  How Fossil does [./password.wiki | password management].
  *  On-line [/help | help].
  *  Documentation on the
     [http://www.sqliteconcepts.org/THManual.pdf | TH1 scripting language],
     used to customize [./custom_ticket.wiki | ticketing], and several other
     subsystems, including [./customskin.md | theming].
  *  List of [./th1.md | TH1 commands provided by Fossil itself] that expose
     its key functionality to TH1 scripts.
  *  A free hosting server for Fossil repositories is available at
     [http://chiselapp.com/].
  *  How to [./server.wiki | set up a server] for your repository.
  *  Customizing the [./custom_ticket.wiki | ticket system].
  *  Methods to [./checkin_names.wiki | identify a specific check-in].
  *  [./inout.wiki | Import and export] from and to Git.
  *  [./fossil-v-git.wiki | Fossil versus Git].
Changes to www/makefile.wiki.
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
The src/ subdirectory also contains documentation about the
makeheaders preprocessor program:

  11.  [../src/makeheaders.html | makeheaders.html]

Click on the link to read this documentation.  In addition there is
a [http://www.tcl.tk/ | Tcl] script used to build the various makefiles:

  12.  makemake.tcl

Running this Tcl script will automatically regenerate all makefiles.
In order to add a new source file to the Fossil implementation, simply
edit makemake.tcl to add the new filename, then rerun the script, and
all of the makefiles for all targets will be rebuild.






|







69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
The src/ subdirectory also contains documentation about the
makeheaders preprocessor program:

  11.  [../src/makeheaders.html | makeheaders.html]

Click on the link to read this documentation.  In addition there is
a [http://www.tcl-lang.org/ | Tcl] script used to build the various makefiles:

  12.  makemake.tcl

Running this Tcl script will automatically regenerate all makefiles.
In order to add a new source file to the Fossil implementation, simply
edit makemake.tcl to add the new filename, then rerun the script, and
all of the makefiles for all targets will be rebuild.
Changes to www/mkdownload.tcl.
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79




80
81
82
83
84
85
86
foreach vers [lsort -decr -real [array names avers]] {
  set hr "/fossil/timeline?c=version-$vers;y=ci"
  puts $out "<tr><td colspan=6 align=left><hr>"
  puts $out "<center><b><a href=\"$hr\">Version $vers</a></b></center>"
  puts $out "</td></tr>"
  puts $out "<tr>"

  foreach {prefix suffix img desc} {
    fossil-linux-x86 zip linux.gif {Linux 3.x x86}
    fossil-macosx-x86 zip mac.gif {Mac 10.x x86}
    fossil-openbsd-x86 zip openbsd.gif {OpenBSD 4.x x86}
    fossil-w32 zip win32.gif {Windows}
    fossil-src tar.gz src.gif {Source Tarball}
  } {
    set filename download/$prefix-$vers.$suffix




    if {[file exists $filename]} {
      set size [file size $filename]
      set units bytes
      if {$size>1024*1024} {
        set size [format %.2f [expr {$size/(1024.0*1024.0)}]]
        set units MiB
      } elseif {$size>1024} {






|
|
|
|
|
|

|
>
>
>
>







65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
foreach vers [lsort -decr -real [array names avers]] {
  set hr "/fossil/timeline?c=version-$vers;y=ci"
  puts $out "<tr><td colspan=6 align=left><hr>"
  puts $out "<center><b><a href=\"$hr\">Version $vers</a></b></center>"
  puts $out "</td></tr>"
  puts $out "<tr>"

  foreach {prefix img desc} {
    fossil-linux-x86 linux.gif {Linux 3.x x86}
    fossil-macosx-x86 mac.gif {Mac 10.x x86}
    fossil-openbsd-x86 openbsd.gif {OpenBSD 5.x x86}
    fossil-w32 win32.gif {Windows}
    fossil-src src.gif {Source Tarball}
  } {
    set basename download/$prefix-$vers
    set filename $basename.tar.gz
    if {![file exists $basename.tar.gz]} {
      set filename $basename.zip
    }
    if {[file exists $filename]} {
      set size [file size $filename]
      set units bytes
      if {$size>1024*1024} {
        set size [format %.2f [expr {$size/(1024.0*1024.0)}]]
        set units MiB
      } elseif {$size>1024} {
Changes to www/mkindex.tcl.
1
2
3
4
5
6
7
8
9
10
11
12
13
14

15
16
17
18
19
20
21
#!/bin/sh
#
# Run this TCL script to generate a WIKI page that contains a
# permuted index of the various documentation files.
#
#    tclsh mkindex.tcl >permutedindex.html
#

set doclist {
  adding_code.wiki {Adding New Features To Fossil}
  adding_code.wiki {Hacking Fossil}
  antibot.wiki {Defense against Spiders and Bots}
  bugtheory.wiki {Bug Tracking In Fossil}
  branching.wiki {Branching, Forking, Merging, and Tagging}

  build.wiki {Compiling and Installing Fossil}
  checkin_names.wiki {Check-in And Version Names}
  checkin.wiki {Check-in Checklist}
  changes.wiki {Fossil Changelog}
  copyright-release.html {Contributor License Agreement}
  concepts.wiki {Fossil Core Concepts}
  contribute.wiki {Contributing Code or Documentation To The Fossil Project}











|

>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/bin/sh
#
# Run this TCL script to generate a WIKI page that contains a
# permuted index of the various documentation files.
#
#    tclsh mkindex.tcl >permutedindex.html
#

set doclist {
  adding_code.wiki {Adding New Features To Fossil}
  adding_code.wiki {Hacking Fossil}
  antibot.wiki {Defense against Spiders and Bots}
  blame.wiki {The Annotate/Blame Algorithm Of Fossil}
  branching.wiki {Branching, Forking, Merging, and Tagging}
  bugtheory.wiki {Bug Tracking In Fossil}
  build.wiki {Compiling and Installing Fossil}
  checkin_names.wiki {Check-in And Version Names}
  checkin.wiki {Check-in Checklist}
  changes.wiki {Fossil Changelog}
  copyright-release.html {Contributor License Agreement}
  concepts.wiki {Fossil Core Concepts}
  contribute.wiki {Contributing Code or Documentation To The Fossil Project}
58
59
60
61
62
63
64

65
66
67
68
69
70
71
  sync.wiki {The Fossil Sync Protocol}
  tech_overview.wiki {A Technical Overview Of The Design And Implementation
                      Of Fossil}
  tech_overview.wiki {SQLite Databases Used By Fossil}
  th1.md {The TH1 Scripting Language}
  tickets.wiki {The Fossil Ticket System}
  theory1.wiki {Thoughts On The Design Of The Fossil DVCS}

  webui.wiki {The Fossil Web Interface}
  wikitheory.wiki {Wiki In Fossil}
}

set permindex {}
set stopwords {fossil and a in of on the to are about used by for or}
foreach {file title} $doclist {






>







59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  sync.wiki {The Fossil Sync Protocol}
  tech_overview.wiki {A Technical Overview Of The Design And Implementation
                      Of Fossil}
  tech_overview.wiki {SQLite Databases Used By Fossil}
  th1.md {The TH1 Scripting Language}
  tickets.wiki {The Fossil Ticket System}
  theory1.wiki {Thoughts On The Design Of The Fossil DVCS}
  webpage-ex.md {Webpage Examples}
  webui.wiki {The Fossil Web Interface}
  wikitheory.wiki {Wiki In Fossil}
}

set permindex {}
set stopwords {fossil and a in of on the to are about used by for or}
foreach {file title} $doclist {
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
set permindex [lsort -dict -index 0 $permindex]
set out [open permutedindex.html w]
fconfigure $out -encoding utf-8 -translation lf
puts $out \
"<div class='fossil-doc' data-title='Index Of Fossil Documentation'>"
puts $out {
<center>
<form action='../../../docsrch' method='GET'>
<input type="text" name="s" size="40" autofocus>
<input type="submit" value="Search Docs">
</form>
</center>
<h2>Primary Documents:</h2>
<ul>
<li> <a href='quickstart.wiki'>Quick-start Guide</a>
<li> <a href='faq.wiki'>FAQ</a>
<li> <a href='build.wiki'>Compiling and installing Fossil</a>
<li> <a href='../COPYRIGHT-BSD2.txt'>License</a>
<li> <a href='http://www.fossil-scm.org/schimpf-book/home'>Jim Schimpf's
book</a>
<li> <a href='../../../help'>Command-line help</a>
</ul>
<a name="pindex"></a>
<h2>Permuted Index:</h2>
<ul>}
foreach entry $permindex {
  foreach {title file} $entry break
  puts $out "<li><a href=\"$file\">$title</a></li>"
}
puts $out "</ul></div>"






|












|









86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
set permindex [lsort -dict -index 0 $permindex]
set out [open permutedindex.html w]
fconfigure $out -encoding utf-8 -translation lf
puts $out \
"<div class='fossil-doc' data-title='Index Of Fossil Documentation'>"
puts $out {
<center>
<form action='$ROOT/docsrch' method='GET'>
<input type="text" name="s" size="40" autofocus>
<input type="submit" value="Search Docs">
</form>
</center>
<h2>Primary Documents:</h2>
<ul>
<li> <a href='quickstart.wiki'>Quick-start Guide</a>
<li> <a href='faq.wiki'>FAQ</a>
<li> <a href='build.wiki'>Compiling and installing Fossil</a>
<li> <a href='../COPYRIGHT-BSD2.txt'>License</a>
<li> <a href='http://www.fossil-scm.org/schimpf-book/home'>Jim Schimpf's
book</a>
<li> <a href='$ROOT/help'>Command-line help</a>
</ul>
<a name="pindex"></a>
<h2>Permuted Index:</h2>
<ul>}
foreach entry $permindex {
  foreach {title file} $entry break
  puts $out "<li><a href=\"$file\">$title</a></li>"
}
puts $out "</ul></div>"
Changes to www/permutedindex.html.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28


29
30
31
32
33
34
35
<div class='fossil-doc' data-title='Index Of Fossil Documentation'>

<center>
<form action='../../../docsrch' method='GET'>
<input type="text" name="s" size="40" autofocus>
<input type="submit" value="Search Docs">
</form>
</center>
<h2>Primary Documents:</h2>
<ul>
<li> <a href='quickstart.wiki'>Quick-start Guide</a>
<li> <a href='faq.wiki'>FAQ</a>
<li> <a href='build.wiki'>Compiling and installing Fossil</a>
<li> <a href='../COPYRIGHT-BSD2.txt'>License</a>
<li> <a href='http://www.fossil-scm.org/schimpf-book/home'>Jim Schimpf's
book</a>
<li> <a href='../../../help'>Command-line help</a>
</ul>
<a name="pindex"></a>
<h2>Permuted Index:</h2>
<ul>
<li><a href="fiveminutes.wiki">5 Minutes as a Single User &mdash; Update and Running in</a></li>
<li><a href="fossil-from-msvc.wiki">2010 IDE &mdash; Integrating Fossil in the Microsoft Express</a></li>
<li><a href="tech_overview.wiki">A Technical Overview Of The Design And Implementation Of Fossil</a></li>
<li><a href="adding_code.wiki">Adding New Features To Fossil</a></li>
<li><a href="antibot.wiki">against Spiders and Bots &mdash; Defense</a></li>
<li><a href="copyright-release.html">Agreement &mdash; Contributor License</a></li>
<li><a href="delta_encoder_algorithm.wiki">Algorithm &mdash; Fossil Delta Encoding</a></li>


<li><a href="customskin.md">Appearance of Web Pages &mdash; Theming: Customizing The</a></li>
<li><a href="fiveminutes.wiki">as a Single User &mdash; Update and Running in 5 Minutes</a></li>
<li><a href="faq.wiki">Asked Questions &mdash; Frequently</a></li>
<li><a href="password.wiki">Authentication &mdash; Password Management And</a></li>
<li><a href="antibot.wiki">Bots &mdash; Defense against Spiders and</a></li>
<li><a href="private.wiki">Branches &mdash; Creating, Syncing, and Deleting Private</a></li>
<li><a href="branching.wiki">Branching, Forking, Merging, and Tagging</a></li>


|












|











>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
<div class='fossil-doc' data-title='Index Of Fossil Documentation'>

<center>
<form action='$ROOT/docsrch' method='GET'>
<input type="text" name="s" size="40" autofocus>
<input type="submit" value="Search Docs">
</form>
</center>
<h2>Primary Documents:</h2>
<ul>
<li> <a href='quickstart.wiki'>Quick-start Guide</a>
<li> <a href='faq.wiki'>FAQ</a>
<li> <a href='build.wiki'>Compiling and installing Fossil</a>
<li> <a href='../COPYRIGHT-BSD2.txt'>License</a>
<li> <a href='http://www.fossil-scm.org/schimpf-book/home'>Jim Schimpf's
book</a>
<li> <a href='$ROOT/help'>Command-line help</a>
</ul>
<a name="pindex"></a>
<h2>Permuted Index:</h2>
<ul>
<li><a href="fiveminutes.wiki">5 Minutes as a Single User &mdash; Update and Running in</a></li>
<li><a href="fossil-from-msvc.wiki">2010 IDE &mdash; Integrating Fossil in the Microsoft Express</a></li>
<li><a href="tech_overview.wiki">A Technical Overview Of The Design And Implementation Of Fossil</a></li>
<li><a href="adding_code.wiki">Adding New Features To Fossil</a></li>
<li><a href="antibot.wiki">against Spiders and Bots &mdash; Defense</a></li>
<li><a href="copyright-release.html">Agreement &mdash; Contributor License</a></li>
<li><a href="delta_encoder_algorithm.wiki">Algorithm &mdash; Fossil Delta Encoding</a></li>
<li><a href="blame.wiki">Algorithm Of Fossil &mdash; The Annotate/Blame</a></li>
<li><a href="blame.wiki">Annotate/Blame Algorithm Of Fossil &mdash; The</a></li>
<li><a href="customskin.md">Appearance of Web Pages &mdash; Theming: Customizing The</a></li>
<li><a href="fiveminutes.wiki">as a Single User &mdash; Update and Running in 5 Minutes</a></li>
<li><a href="faq.wiki">Asked Questions &mdash; Frequently</a></li>
<li><a href="password.wiki">Authentication &mdash; Password Management And</a></li>
<li><a href="antibot.wiki">Bots &mdash; Defense against Spiders and</a></li>
<li><a href="private.wiki">Branches &mdash; Creating, Syncing, and Deleting Private</a></li>
<li><a href="branching.wiki">Branching, Forking, Merging, and Tagging</a></li>
68
69
70
71
72
73
74

75
76
77
78
79
80
81
<li><a href="embeddeddoc.wiki">Documentation &mdash; Embedded Project</a></li>
<li><a href="contribute.wiki">Documentation To The Fossil Project &mdash; Contributing Code or</a></li>
<li><a href="theory1.wiki">DVCS &mdash; Thoughts On The Design Of The Fossil</a></li>
<li><a href="quotes.wiki">DVCSes in General &mdash; Quotes: What People Are Saying About Fossil, Git, and</a></li>
<li><a href="embeddeddoc.wiki">Embedded Project Documentation</a></li>
<li><a href="delta_encoder_algorithm.wiki">Encoding Algorithm &mdash; Fossil Delta</a></li>
<li><a href="event.wiki">Events</a></li>

<li><a href="inout.wiki">Export To And From Git &mdash; Import And</a></li>
<li><a href="fossil-from-msvc.wiki">Express 2010 IDE &mdash; Integrating Fossil in the Microsoft</a></li>
<li><a href="adding_code.wiki">Features To Fossil &mdash; Adding New</a></li>
<li><a href="fileformat.wiki">File Format &mdash; Fossil</a></li>
<li><a href="branching.wiki">Forking, Merging, and Tagging &mdash; Branching,</a></li>
<li><a href="delta_format.wiki">Format &mdash; Fossil Delta</a></li>
<li><a href="fileformat.wiki">Format &mdash; Fossil File</a></li>






>







70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
<li><a href="embeddeddoc.wiki">Documentation &mdash; Embedded Project</a></li>
<li><a href="contribute.wiki">Documentation To The Fossil Project &mdash; Contributing Code or</a></li>
<li><a href="theory1.wiki">DVCS &mdash; Thoughts On The Design Of The Fossil</a></li>
<li><a href="quotes.wiki">DVCSes in General &mdash; Quotes: What People Are Saying About Fossil, Git, and</a></li>
<li><a href="embeddeddoc.wiki">Embedded Project Documentation</a></li>
<li><a href="delta_encoder_algorithm.wiki">Encoding Algorithm &mdash; Fossil Delta</a></li>
<li><a href="event.wiki">Events</a></li>
<li><a href="webpage-ex.md">Examples &mdash; Webpage</a></li>
<li><a href="inout.wiki">Export To And From Git &mdash; Import And</a></li>
<li><a href="fossil-from-msvc.wiki">Express 2010 IDE &mdash; Integrating Fossil in the Microsoft</a></li>
<li><a href="adding_code.wiki">Features To Fossil &mdash; Adding New</a></li>
<li><a href="fileformat.wiki">File Format &mdash; Fossil</a></li>
<li><a href="branching.wiki">Forking, Merging, and Tagging &mdash; Branching,</a></li>
<li><a href="delta_format.wiki">Format &mdash; Fossil Delta</a></li>
<li><a href="fileformat.wiki">Format &mdash; Fossil File</a></li>
170
171
172
173
174
175
176

177
178
179
180
181
182
183
<li><a href="private.wiki">Syncing, and Deleting Private Branches &mdash; Creating,</a></li>
<li><a href="custom_ticket.wiki">System &mdash; Customizing The Ticket</a></li>
<li><a href="tickets.wiki">System &mdash; The Fossil Ticket</a></li>
<li><a href="branching.wiki">Tagging &mdash; Branching, Forking, Merging, and</a></li>
<li><a href="tech_overview.wiki">Technical Overview Of The Design And Implementation Of Fossil &mdash; A</a></li>
<li><a href="../test/release-checklist.wiki">Testing Checklist &mdash; Pre-Release</a></li>
<li><a href="th1.md">TH1 Scripting Language &mdash; The</a></li>

<li><a href="makefile.wiki">The Fossil Build Process</a></li>
<li><a href="sync.wiki">The Fossil Sync Protocol</a></li>
<li><a href="tickets.wiki">The Fossil Ticket System</a></li>
<li><a href="webui.wiki">The Fossil Web Interface</a></li>
<li><a href="th1.md">The TH1 Scripting Language</a></li>
<li><a href="customskin.md">Theming: Customizing The Appearance of Web Pages</a></li>
<li><a href="customgraph.md">Theming: Customizing the Timeline Graph</a></li>






>







173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
<li><a href="private.wiki">Syncing, and Deleting Private Branches &mdash; Creating,</a></li>
<li><a href="custom_ticket.wiki">System &mdash; Customizing The Ticket</a></li>
<li><a href="tickets.wiki">System &mdash; The Fossil Ticket</a></li>
<li><a href="branching.wiki">Tagging &mdash; Branching, Forking, Merging, and</a></li>
<li><a href="tech_overview.wiki">Technical Overview Of The Design And Implementation Of Fossil &mdash; A</a></li>
<li><a href="../test/release-checklist.wiki">Testing Checklist &mdash; Pre-Release</a></li>
<li><a href="th1.md">TH1 Scripting Language &mdash; The</a></li>
<li><a href="blame.wiki">The Annotate/Blame Algorithm Of Fossil</a></li>
<li><a href="makefile.wiki">The Fossil Build Process</a></li>
<li><a href="sync.wiki">The Fossil Sync Protocol</a></li>
<li><a href="tickets.wiki">The Fossil Ticket System</a></li>
<li><a href="webui.wiki">The Fossil Web Interface</a></li>
<li><a href="th1.md">The TH1 Scripting Language</a></li>
<li><a href="customskin.md">Theming: Customizing The Appearance of Web Pages</a></li>
<li><a href="customgraph.md">Theming: Customizing the Timeline Graph</a></li>
191
192
193
194
195
196
197

198
199
200
201
<li><a href="hints.wiki">Usage Hints &mdash; Fossil Tips And</a></li>
<li><a href="fiveminutes.wiki">User &mdash; Update and Running in 5 Minutes as a Single</a></li>
<li><a href="ssl.wiki">Using SSL with Fossil</a></li>
<li><a href="checkin_names.wiki">Version Names &mdash; Check-in And</a></li>
<li><a href="fossil-v-git.wiki">Versus Git &mdash; Fossil</a></li>
<li><a href="webui.wiki">Web Interface &mdash; The Fossil</a></li>
<li><a href="customskin.md">Web Pages &mdash; Theming: Customizing The Appearance of</a></li>

<li><a href="quotes.wiki">What People Are Saying About Fossil, Git, and DVCSes in General &mdash; Quotes:</a></li>
<li><a href="wikitheory.wiki">Wiki In Fossil</a></li>
<li><a href="ssl.wiki">with Fossil &mdash; Using SSL</a></li>
</ul></div>






>




195
196
197
198
199
200
201
202
203
204
205
206
<li><a href="hints.wiki">Usage Hints &mdash; Fossil Tips And</a></li>
<li><a href="fiveminutes.wiki">User &mdash; Update and Running in 5 Minutes as a Single</a></li>
<li><a href="ssl.wiki">Using SSL with Fossil</a></li>
<li><a href="checkin_names.wiki">Version Names &mdash; Check-in And</a></li>
<li><a href="fossil-v-git.wiki">Versus Git &mdash; Fossil</a></li>
<li><a href="webui.wiki">Web Interface &mdash; The Fossil</a></li>
<li><a href="customskin.md">Web Pages &mdash; Theming: Customizing The Appearance of</a></li>
<li><a href="webpage-ex.md">Webpage Examples</a></li>
<li><a href="quotes.wiki">What People Are Saying About Fossil, Git, and DVCSes in General &mdash; Quotes:</a></li>
<li><a href="wikitheory.wiki">Wiki In Fossil</a></li>
<li><a href="ssl.wiki">with Fossil &mdash; Using SSL</a></li>
</ul></div>
Changes to www/quickstart.wiki.
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68


69

70

71
72



73
74
75
76
77
78
79
80
81
    
    <p>Clone a remote repository as follows: ([/help/clone | more info])</p>
    
    <blockquote>
    <b>fossil clone</b> <i>URL  repository-filename</i>
    </blockquote>
    
    <p>The <i>URL</i> above is the http URL for the fossil repository
    you want to clone, and it may include a "user:password" part, e.g.
    <tt>http://user:password@www.fossil-scm.org/fossil</tt>. You can
    call the new repository anything you want - there are no naming
    restrictions.  As an example, you can clone the fossil repository
    this way:</p>
    
    <blockquote>
    <b>fossil clone http://www.fossil-scm.org/ myclone.fossil</b>
    </blockquote>



    <p>The new local copy of the repository is stored in a single file,

    which in the example above is named "myclone.fossil".

    You can name your repositories anything you want.  The ".fossil" suffix
    is not required.</p>




    <p>Note: If you are behind a restrictive firewall, you might need
    to <a href="#proxy">specify an HTTP proxy</a>.</p>

    <p>A Fossil repository is a single disk file.  Instead of cloning,
    you can just make a copy of the repository file (for example, using
    "scp").  Note, however, that the repository file contains auxiliary
    information above and beyond the versioned files, including some
    sensitive information such as password hashes and email addresses.  If you






|
|
<
|
|
<





>
>
|
>
|
>
|
|
>
>
>

|







51
52
53
54
55
56
57
58
59

60
61

62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
    
    <p>Clone a remote repository as follows: ([/help/clone | more info])</p>
    
    <blockquote>
    <b>fossil clone</b> <i>URL  repository-filename</i>
    </blockquote>
    
    <p>The <i>URL</i> specifies the fossil repository
    you want to clone.  The <i>repository-filename</i> is the new local

    filename into which the cloned repository will be written.  For
    example:

    
    <blockquote>
    <b>fossil clone http://www.fossil-scm.org/ myclone.fossil</b>
    </blockquote>

    <p>If the remote repository requires a login, include a 
    userid in the URL like this:

    <blockquote>
    <b>fossil clone http://</b><i>userid</i><b>@www.fossil-scm.org/ myclone.fossil</b>
    </blockquote>
 

    <p>You will be prompted separately for the password.
     Use "%HH" escapes for special characters in the userid. 
     Examples: "%40" in place of "@" and "%2F" in place of "/".

    <p>If you are behind a restrictive firewall, you might need
    to <a href="#proxy">specify an HTTP proxy</a>.</p>

    <p>A Fossil repository is a single disk file.  Instead of cloning,
    you can just make a copy of the repository file (for example, using
    "scp").  Note, however, that the repository file contains auxiliary
    information above and beyond the versioned files, including some
    sensitive information such as password hashes and email addresses.  If you
Changes to www/quotes.wiki.
75
76
77
78
79
80
81




82
83
84
85
86
87
88
89
90
91
92
93
94
<li>If programmers _really_ wanted to help scientists, they'd build a version control
system that was more usable than Git.

<blockquote>
<i>Tweet by Greg Wilson @gvwilson on 2015-02-22 17:47</i>
</blockquote>





</ol>

<h2>On The Usability Of Fossil:</h2>

<ol>
<li value=10>
Fossil mesmerizes me with simplicity especially after I struggled to
get a bug-tracking system to work with mercurial.

<blockquote>
<i>rawjeev at [http://stackoverflow.com/questions/156322/what-do-people-think-of-the-fossil-dvcs]</i>
</blockquote>







>
>
>
>





|







75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
<li>If programmers _really_ wanted to help scientists, they'd build a version control
system that was more usable than Git.

<blockquote>
<i>Tweet by Greg Wilson @gvwilson on 2015-02-22 17:47</i>
</blockquote>

<li><img src='xkcd-git.gif' align='top'>

<blockquote><i>Randall Munroe.  [http://xkcd.com/1597/]</i></blockquote>

</ol>

<h2>On The Usability Of Fossil:</h2>

<ol>
<li value=11>
Fossil mesmerizes me with simplicity especially after I struggled to
get a bug-tracking system to work with mercurial.

<blockquote>
<i>rawjeev at [http://stackoverflow.com/questions/156322/what-do-people-think-of-the-fossil-dvcs]</i>
</blockquote>

120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
</ol>


<h2>On Git Versus Fossil</h2>

<ol>
<li value=14>
Just want to say thanks for fossil making my life easier.... 
Also <nowiki>[for]</nowiki> not having a misanthropic command line interface.

<blockquote>
<i>Joshua Paine at [http://www.mail-archive.com/fossil-users@lists.fossil-scm.org/msg02736.html]</i>
</blockquote>







|







124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
</ol>


<h2>On Git Versus Fossil</h2>

<ol>
<li value=15>
Just want to say thanks for fossil making my life easier.... 
Also <nowiki>[for]</nowiki> not having a misanthropic command line interface.

<blockquote>
<i>Joshua Paine at [http://www.mail-archive.com/fossil-users@lists.fossil-scm.org/msg02736.html]</i>
</blockquote>

Changes to www/server.wiki.
1
2
3
4
5
6

7
8
9
10
11
12
13
<title>How To Configure A Fossil Server</title>
<h2>Introduction</h2><blockquote>
<p>A server is not necessary to use Fossil, but a server does help in collaborating with
peers.  A Fossil server also works well as a complete website for a project.
For example, the complete <b>http://www.fossil-scm.org/</b> website, including the
page you are now reading (but excepting the download page),

is just a Fossil server displaying the content of the 
self-hosting repository for Fossil.</p>
<p>This article is a guide for setting up your own Fossil server.</p></blockquote>
<h2>Overview</h2><blockquote>
There are basically four ways to set up a Fossil server:
<ol>
<li>A stand-alone server



|
|
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
<title>How To Configure A Fossil Server</title>
<h2>Introduction</h2><blockquote>
<p>A server is not necessary to use Fossil, but a server does help in collaborating with
peers.  A Fossil server also works well as a complete website for a project.
For example, the complete [https://www.fossil-scm.org/] website, including the
page you are now reading (but excepting the 
[https://www.fossil-scm.org/download.html|download page]),
is just a Fossil server displaying the content of the 
self-hosting repository for Fossil.</p>
<p>This article is a guide for setting up your own Fossil server.</p></blockquote>
<h2>Overview</h2><blockquote>
There are basically four ways to set up a Fossil server:
<ol>
<li>A stand-alone server
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
systems that support the "getloadavg()" API.  Most modern Unix systems have
this interface, but Windows does not, so the feature will not work on Windows.
Note also that Linux implements "getloadavg()" by accessing the "/proc/loadavg"
file in the "proc" virtual filesystem.  If you are running a Fossil instance
inside a chroot() jail on Linux, you will need to make the "/proc" file
system available inside that jail in order for this feature to work.  On
the self-hosting Fossil repository, this was accomplished by adding a line
to the "/etc/mtab" file that looks like:
<blockquote><pre>
chroot_jail_proc /home/www/proc proc r 0 0
</pre></blockquote>
Pathnames should be adjusted for individual systems, of course.
<p>
To see if the load-average limiter is functional, visit the [/test_env] page
of the server to view the current load average.  If the value for the load






|







343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
systems that support the "getloadavg()" API.  Most modern Unix systems have
this interface, but Windows does not, so the feature will not work on Windows.
Note also that Linux implements "getloadavg()" by accessing the "/proc/loadavg"
file in the "proc" virtual filesystem.  If you are running a Fossil instance
inside a chroot() jail on Linux, you will need to make the "/proc" file
system available inside that jail in order for this feature to work.  On
the self-hosting Fossil repository, this was accomplished by adding a line
to the "/etc/mtab" or "/etc/fstab" file that looks like:
<blockquote><pre>
chroot_jail_proc /home/www/proc proc r 0 0
</pre></blockquote>
Pathnames should be adjusted for individual systems, of course.
<p>
To see if the load-average limiter is functional, visit the [/test_env] page
of the server to view the current load average.  If the value for the load
Changes to www/stats.wiki.
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
<td>3.4&nbsp;GB
<td>45.5&nbsp;MB
<td>73:1
<td>29.9&nbsp;MB
</tr>

<tr align="center">
<td>[http://core.tcl.tk/tcl/timeline | TCL]
<td>139662
<td>18125
<td>6183&nbsp;days<br>16.93&nbsp;years
<td>6.6&nbsp;GB
<td>192.6&nbsp;MB
<td>34:1
<td>117.1&nbsp;MB






|







30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
<td>3.4&nbsp;GB
<td>45.5&nbsp;MB
<td>73:1
<td>29.9&nbsp;MB
</tr>

<tr align="center">
<td>[http://core.tcl-lang.org/tcl/timeline | TCL]
<td>139662
<td>18125
<td>6183&nbsp;days<br>16.93&nbsp;years
<td>6.6&nbsp;GB
<td>192.6&nbsp;MB
<td>34:1
<td>117.1&nbsp;MB
Changes to www/style.wiki.
1
2
3
4
5
6
7
8
9
10
11
12
<title>Coding Style</title>

Fossil source code should following the style guidelines below.

<b>General points:</b>:

  10.  No line of code exceeds 80 characters in length.  (Occasional
       exceptions are made for HTML text on @-lines.)

  11.  There are no tab characters.

  12.  Line terminators are \n only.  Do not use a \r\n line terminator.



|







1
2
3
4
5
6
7
8
9
10
11
12
<title>Coding Style</title>

Fossil source code should following the style guidelines below.

<b>General points</b>:

  10.  No line of code exceeds 80 characters in length.  (Occasional
       exceptions are made for HTML text on @-lines.)

  11.  There are no tab characters.

  12.  Line terminators are \n only.  Do not use a \r\n line terminator.
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  <li>  The check-list items for functions also apply to major subsections
     within a function.

  <li>  All code subblocks are enclosed in {...}.


  <li> <b>assert() macros are used as follows </b>:
    <ol type="a">

  <li>  Function preconditions are clearly stated and verified by asserts.

  <li>  Invariants are identified by asserts.
    </ol>







|







65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  <li>  The check-list items for functions also apply to major subsections
     within a function.

  <li>  All code subblocks are enclosed in {...}.


  <li> <b>assert() macros are used as follows</b>:
    <ol type="a">

  <li>  Function preconditions are clearly stated and verified by asserts.

  <li>  Invariants are identified by asserts.
    </ol>

Changes to www/th1.md.
81
82
83
84
85
86
87


88
89
90
91
92
93
94
----------------------------

The original Tcl language after when TH1 is modeled has a very rich
repertoire of commands.  TH1, as it is designed to be minimalist and
embedded has a greatly reduced command set.  The following bullets
summarize the commands available in TH1:



  *  break
  *  catch SCRIPT ?VARIABLE?
  *  continue
  *  error ?STRING?
  *  expr EXPR
  *  for INIT-SCRIPT TEST-EXPR NEXT-SCRIPT BODY-SCRIPT
  *  if EXPR SCRIPT (elseif EXPR SCRIPT)* ?else SCRIPT?






>
>







81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
----------------------------

The original Tcl language after when TH1 is modeled has a very rich
repertoire of commands.  TH1, as it is designed to be minimalist and
embedded has a greatly reduced command set.  The following bullets
summarize the commands available in TH1:

  *  array exists VARNAME
  *  array names VARNAME
  *  break
  *  catch SCRIPT ?VARIABLE?
  *  continue
  *  error ?STRING?
  *  expr EXPR
  *  for INIT-SCRIPT TEST-EXPR NEXT-SCRIPT BODY-SCRIPT
  *  if EXPR SCRIPT (elseif EXPR SCRIPT)* ?else SCRIPT?
110
111
112
113
114
115
116
117

118







119
120
121
122
123
124
125
126
127
128
129
130
131
132

133

134
135
136
137
138
139
140
141
142

143
144
145
146
147

148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165

166
167
168
169
170
171
172
  *  string length STRING
  *  string range STRING FIRST LAST
  *  string repeat STRING COUNT
  *  unset VARNAME
  *  uplevel ?LEVEL? SCRIPT
  *  upvar ?FRAME? OTHERVAR MYVAR ?OTHERVAR MYVAR?

All of the above commands works as in the original Tcl.  Refer to the

Tcl documentation for details.








TH1 Extended Commands
---------------------

There are many new commands added to TH1 and used to access the special
features of Fossil.  The following is a summary of the extended commands:

  *  anoncap
  *  anycap
  *  artifact
  *  checkout
  *  combobox
  *  date
  *  decorate

  *  enable_output

  *  getParameter
  *  glob_match
  *  globalState
  *  hascap
  *  hasfeature
  *  html
  *  htmlize
  *  http
  *  httpize

  *  linecount
  *  markdown
  *  puts
  *  query
  *  randhex

  *  regexp
  *  reinitialize
  *  render
  *  repository
  *  searchable
  *  setParameter
  *  setting
  *  styleHeader
  *  styleFooter
  *  tclEval
  *  tclExpr
  *  tclInvoke
  *  tclIsSafe
  *  tclMakeSafe
  *  tclReady
  *  trace
  *  stime
  *  utime

  *  wiki

Each of the commands above is documented by a block comment above their
implementation in the th\_main.c or th\_tcl.c source files.

All commands starting with "tcl", with the exception of "tclReady",
require the Tcl integration subsystem be included at compile-time.






|
>
|
>
>
>
>
>
>
>














>
|
>

|







>





>


















>







112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
  *  string length STRING
  *  string range STRING FIRST LAST
  *  string repeat STRING COUNT
  *  unset VARNAME
  *  uplevel ?LEVEL? SCRIPT
  *  upvar ?FRAME? OTHERVAR MYVAR ?OTHERVAR MYVAR?

All of the above commands work as in the original Tcl.  Refer to the
<a href="https://www.tcl-lang.org/man/tcl/contents.htm">Tcl documentation</a>
for details.

Summary of Core TH1 Variables
-----------------------------

  *  tcl\_platform(engine) -- _This will always have the value "TH1"._
  *  tcl\_platform(platform) -- _This will have the value "windows" or "unix"._
  *  th\_stack\_trace -- _This will contain error stack information._

TH1 Extended Commands
---------------------

There are many new commands added to TH1 and used to access the special
features of Fossil.  The following is a summary of the extended commands:

  *  anoncap
  *  anycap
  *  artifact
  *  checkout
  *  combobox
  *  date
  *  decorate
  *  dir
  *  enable\_output
  *  encode64
  *  getParameter
  *  glob\_match
  *  globalState
  *  hascap
  *  hasfeature
  *  html
  *  htmlize
  *  http
  *  httpize
  *  insertCsrf
  *  linecount
  *  markdown
  *  puts
  *  query
  *  randhex
  *  redirect
  *  regexp
  *  reinitialize
  *  render
  *  repository
  *  searchable
  *  setParameter
  *  setting
  *  styleHeader
  *  styleFooter
  *  tclEval
  *  tclExpr
  *  tclInvoke
  *  tclIsSafe
  *  tclMakeSafe
  *  tclReady
  *  trace
  *  stime
  *  utime
  *  verifyCsrf
  *  wiki

Each of the commands above is documented by a block comment above their
implementation in the th\_main.c or th\_tcl.c source files.

All commands starting with "tcl", with the exception of "tclReady",
require the Tcl integration subsystem be included at compile-time.
231
232
233
234
235
236
237












238
239
240
241
242
243
244
245







246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
-------------------------------------------

  *  decorate STRING

Renders STRING as wiki content; however, only links are handled.  No
other markup is processed.













<a name="enable_output"></a>TH1 enable_output Command
-----------------------------------------------------

  *  enable_output BOOLEAN

Enable or disable sending output when the combobox, puts, or wiki
commands are used.








<a name="getParameter"></a>TH1 getParameter Command
---------------------------------------------------

  *  getParameter NAME ?DEFAULT?

Returns the value of the specified query parameter or the specified
default value when there is no matching query parameter.

<a name="glob_match"></a>TH1 glob_match Command
-----------------------------------------------

  *  glob_match ?-one? ?--? patternList string

Checks the string against the specified glob pattern -OR- list of glob
patterns and returns non-zero if there is a match.

<a name="globalState"></a>TH1 globalState Command
-------------------------------------------------







>
>
>
>
>
>
>
>
>
>
>
>
|
|

|




>
>
>
>
>
>
>








|
|

|







246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
-------------------------------------------

  *  decorate STRING

Renders STRING as wiki content; however, only links are handled.  No
other markup is processed.

<a name="dir"></a>TH1 dir Command
-------------------------------------------

  * dir CHECKIN ?GLOB? ?DETAILS?

Returns a list containing all files in CHECKIN. If GLOB is given only
the files matching the pattern GLOB within CHECKIN will be returned.
If DETAILS is non-zero, the result will be a list-of-lists, with each
element containing at least three elements: the file name, the file
size (in bytes), and the file last modification time (relative to the
time zone configured for the repository).

<a name="enable_output"></a>TH1 enable\_output Command
------------------------------------------------------

  *  enable\_output BOOLEAN

Enable or disable sending output when the combobox, puts, or wiki
commands are used.

<a name="encode64"></a>TH1 encode64 Command
-------------------------------------------

  *  encode64 STRING

Encode the specified string using Base64 and return the result.

<a name="getParameter"></a>TH1 getParameter Command
---------------------------------------------------

  *  getParameter NAME ?DEFAULT?

Returns the value of the specified query parameter or the specified
default value when there is no matching query parameter.

<a name="glob_match"></a>TH1 glob\_match Command
------------------------------------------------

  *  glob\_match ?-one? ?--? patternList string

Checks the string against the specified glob pattern -OR- list of glob
patterns and returns non-zero if there is a match.

<a name="globalState"></a>TH1 globalState Command
-------------------------------------------------

295
296
297
298
299
300
301

302
303
304
305
306
307
308
309
310
311



312
313
314
315
316
317
318
  *  hasfeature STRING

Returns true if the binary has the given compile-time feature enabled.
The possible features are:

  1. **ssl** -- _Support for the HTTPS transport._
  1. **legacyMvRm** -- _Support for legacy mv/rm command behavior._

  1. **th1Docs** -- _Support for TH1 in embedded documentation._
  1. **th1Hooks** -- _Support for TH1 command and web page hooks._
  1. **tcl** -- _Support for Tcl integration._
  1. **useTclStubs** -- _Tcl stubs enabled in the Tcl headers._
  1. **tclStubs** -- _Uses Tcl stubs (i.e. linking with stubs library)._
  1. **tclPrivateStubs** -- _Uses Tcl private stubs (i.e. header-only)._
  1. **json** -- _Support for the JSON APIs._
  1. **markdown** -- _Support for Markdown documentation format._
  1. **unicodeCmdLine** -- _The command line arguments are Unicode._
  1. **dynamicBuild** -- _Dynamically linked to libraries._




<a name="html"></a>TH1 html Command
-----------------------------------

  *  html STRING

Outputs the STRING escaped for HTML.






>










>
>
>







329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
  *  hasfeature STRING

Returns true if the binary has the given compile-time feature enabled.
The possible features are:

  1. **ssl** -- _Support for the HTTPS transport._
  1. **legacyMvRm** -- _Support for legacy mv/rm command behavior._
  1. **execRelPaths** -- _Use relative paths with external diff/gdiff._
  1. **th1Docs** -- _Support for TH1 in embedded documentation._
  1. **th1Hooks** -- _Support for TH1 command and web page hooks._
  1. **tcl** -- _Support for Tcl integration._
  1. **useTclStubs** -- _Tcl stubs enabled in the Tcl headers._
  1. **tclStubs** -- _Uses Tcl stubs (i.e. linking with stubs library)._
  1. **tclPrivateStubs** -- _Uses Tcl private stubs (i.e. header-only)._
  1. **json** -- _Support for the JSON APIs._
  1. **markdown** -- _Support for Markdown documentation format._
  1. **unicodeCmdLine** -- _The command line arguments are Unicode._
  1. **dynamicBuild** -- _Dynamically linked to libraries._

Specifying an unknown feature will return a value of false, it will not
raise a script error.

<a name="html"></a>TH1 html Command
-----------------------------------

  *  html STRING

Outputs the STRING escaped for HTML.
341
342
343
344
345
346
347








348
349
350
351
352
353
354
<a name="httpize"></a>TH1 httpize Command
-----------------------------------------

  *  httpize STRING

Escape all characters of STRING which have special meaning in URI
components.  Returns the escaped string.









<a name="linecount"></a>TH1 linecount Command
---------------------------------------------

  *  linecount STRING MAX MIN

Returns one more than the number of \n characters in STRING.  But






>
>
>
>
>
>
>
>







379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
<a name="httpize"></a>TH1 httpize Command
-----------------------------------------

  *  httpize STRING

Escape all characters of STRING which have special meaning in URI
components.  Returns the escaped string.

<a name="insertCsrf"></a>TH1 insertCsrf Command
-----------------------------------------------

  *  insertCsrf

While rendering a form, call this command to add the Anti-CSRF token
as a hidden element of the form.

<a name="linecount"></a>TH1 linecount Command
---------------------------------------------

  *  linecount STRING MAX MIN

Returns one more than the number of \n characters in STRING.  But
385
386
387
388
389
390
391








392
393
394
395
396
397
398
<a name="randhex"></a>TH1 randhex Command
-----------------------------------------

  *  randhex N

Returns a string of N*2 random hexadecimal digits with N<50.  If N is
omitted, use a value of 10.









<a name="regexp"></a>TH1 regexp Command
---------------------------------------

  *  regexp ?-nocase? ?--? exp string

Checks the string against the specified regular expression and returns






>
>
>
>
>
>
>
>







431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
<a name="randhex"></a>TH1 randhex Command
-----------------------------------------

  *  randhex N

Returns a string of N*2 random hexadecimal digits with N<50.  If N is
omitted, use a value of 10.

<a name="redirect"></a>TH1 redirect Command
-------------------------------------------

  *  redirect URL

Issues an HTTP redirect (302) to the specified URL and then exits the
process.

<a name="regexp"></a>TH1 regexp Command
---------------------------------------

  *  regexp ?-nocase? ?--? exp string

Checks the string against the specified regular expression and returns
561
562
563
564
565
566
567











568
569
570
571
572
573
574
<a name="utime"></a>TH1 utime Command
-------------------------------------

  *  utime

Returns the number of microseconds of CPU time consumed by the current
process in user space.












<a name="wiki"></a>TH1 wiki Command
-----------------------------------

  *  wiki STRING

Renders STRING as wiki content.






>
>
>
>
>
>
>
>
>
>
>







615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
<a name="utime"></a>TH1 utime Command
-------------------------------------

  *  utime

Returns the number of microseconds of CPU time consumed by the current
process in user space.

<a name="verifyCsrf"></a>TH1 verifyCsrf Command
-----------------------------------------------

  *  verifyCsrf

Before using the results of a form, first call this command to verify
that this Anti-CSRF token is present and is valid.  If the Anti-CSRF token
is missing or is incorrect, that indicates a cross-site scripting attack.
If the event of an attack is detected, an error message is generated and
all further processing is aborted.

<a name="wiki"></a>TH1 wiki Command
-----------------------------------

  *  wiki STRING

Renders STRING as wiki content.
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
**This command requires the Tcl integration feature.**

  *  th1Expr arg

Evaluates the TH1 expression and returns its result verbatim.  If a TH1
script error is generated, it will be transformed into a Tcl script error.

Further Notes
-------------

**To Do:** We would like to have a community volunteer go through and
copy the documentation for each of these commands (with appropriate
format changes and spelling and grammar corrections) into subsequent
sections of this document. It is suggested that the list of extension
commands be left intact - as a quick reference.  But it would be really
nice to also have the details of what each command does.






<
<
<
<
<
<
<
<
<
<
664
665
666
667
668
669
670










**This command requires the Tcl integration feature.**

  *  th1Expr arg

Evaluates the TH1 expression and returns its result verbatim.  If a TH1
script error is generated, it will be transformed into a Tcl script error.










Changes to www/uitest.html.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<html>
<head>
<title>Fossil UI Test</title>
</head>
<body>
<script>
  var aTest = [
///////////////////////////////////////////////////////////////////////////
///  Add pages to be tested below:
//////////////////////////////////////////////////////////////////////////
{
 url: "timeline",
 desc: 
   "Simple timeline of most recent check-ins. Verify that all submenus work."
},
{
 url: "timeline?n=125",
 desc: 
   "Timeline with 125 entries.  Verify that submenus preserve the entry count."
},
{
 url: "wiki",
 desc: 
   "The wiki homepage"
}
//////////////////////////////////////////////////////////////////////////////
///  End of testing data
/////////////////////////////////////////////////////////////////////////////
  ];
  var iTest = 0;











|




|




|







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
<html>
<head>
<title>Fossil UI Test</title>
</head>
<body>
<script>
  var aTest = [
///////////////////////////////////////////////////////////////////////////
///  Add pages to be tested below:
//////////////////////////////////////////////////////////////////////////
{
 url: "timeline",
 desc:
   "Simple timeline of most recent check-ins. Verify that all submenus work."
},
{
 url: "timeline?n=125",
 desc:
   "Timeline with 125 entries.  Verify that submenus preserve the entry count."
},
{
 url: "wiki",
 desc:
   "The wiki homepage"
}
//////////////////////////////////////////////////////////////////////////////
///  End of testing data
/////////////////////////////////////////////////////////////////////////////
  ];
  var iTest = 0;
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  xprev.hidden = 1;
  xnext.hidden = 1;
  xpass.hidden = 1;
  xstart.hidden = 0;
  xstart.href = baseURI + aTest[0].url;
  function startTest(){
    setTimeout(loadPage,1);
  } 
  function prevTest(){
    if( iTest<=0 ) return false;
    iTest--;
    setTimeout(loadPage,1);
  }
  function nextTest(){
    if( iTest+1>=nTest ) return false;






|







96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  xprev.hidden = 1;
  xnext.hidden = 1;
  xpass.hidden = 1;
  xstart.hidden = 0;
  xstart.href = baseURI + aTest[0].url;
  function startTest(){
    setTimeout(loadPage,1);
  }
  function prevTest(){
    if( iTest<=0 ) return false;
    iTest--;
    setTimeout(loadPage,1);
  }
  function nextTest(){
    if( iTest+1>=nTest ) return false;
Changes to www/webpage-ex.md.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60



61





62

63
64




65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85




86
87
88
89
90
91
92
93
94
95
96
97
98








Web-Page Examples
=================

Here are a few examples of the many web pages supported
by Fossil.  This is not an exhaustive list.
Explore hyperlinks to see more.
<style>
.exbtn {
  border: 1px solid #000;
  margin: 1ex;
  border-radius: 1ex;
  padding: 0 1ex;
  background-color: #eee;
}
</style>

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?y=ci&n=100'>Example</a>
     100 most recent check-ins.

  *  <a target='_blank' class='exbtn'
     href='../../../finfo?name=src/file.c'>Example</a>
     All changes to the <b>src/file.c</b> source file.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=200&uf=0c3c2d086a'>Example</a>
     All check-ins using a particular version of the <b>src/file.c</b>
     source file.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=11&y=ci&c=2014-01-01'>Example</a>
     Check-ins proximate to an historical point in time (2014-01-01).

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=11&y=ci&c=2014-01-01&v=1'>Example</a>
     The previous augmented with file changes.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=25&y=ci&a=1970-01-01'>Example</a>
     First 25 check-ins after 1970-01-01.  (The first 25 check-ins of
     the project.)

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=200&r=svn-import'>Example</a>
     All check-ins of the "svn-import" branch together with check-ins
     that merge with that branch.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=200&t=svn-import'>Example</a>
     All check-ins of the "svn-import" branch only.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?n=100&y=ci&ubg'>Example</a>
     100 most recent check-ins color coded by committer.

  *  <a target='_blank' class='exbtn'
     href='../../../timeline?from=version-1.27&to=version-1.28'>Example</a>
     All check-ins on the most direct path from 
     version-1.27 to version-1.28




     (Hint:  In any graph above, click the square node boxes 





     for two check-ins or files to see a diff.)


  *  <a target='_blank' class='exbtn'




     href='../../../tree?ci=daff9d20621&type=tree'>Example</a>
     All files for a particular check-in (daff9d20621480)

  *  <a target='_blank' class='exbtn'
     href='../../../tree?ci=trunk&type=tree&mtime=1'>Example</a>
     All files for the latest check-in on a branch (trunk) sorted by
     last modification time.

  *  <a target='_blank' class='exbtn'
     href='../../../fileage?name=svn-import'>Example</a>
     Age of all files in the latest checking for branch "svn-import".

  *  <a target='_blank' class='exbtn'
     href='../../../brlist'>Example</a>
     Table of branches.  (Click on column headers to sort.)

  *  <a target='_blank' class='exbtn'
     href='../../../stat'>Example</a>
     Overall repository status.

  *  <a target='_blank' class='exbtn'




     href='../../../reports?view=byfile'>Example</a>
     Number of check-ins for each source file.
     (Click on column headers to sort.)

  *  <a target='_blank' class='exbtn'
     href='../../../blame?checkin=5260fbf63287&filename=src/rss.c&limit=-1'>
       Example</a>
     Most recent change to each line of a particular source file in a 
     particular check-in.

  *  <a target='_blank' class='exbtn'
     href='../../../taglist'>Example</a>
     List of tags on check-ins.










|
|
|











|



|



|




|



|
|


|




|




|



|
|


|
|


>
>
>
|
>
>
>
>
>
|
>


>
>
>
>
|



|




|



|



|



>
>
>
>
|




|

|



|

>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
Web-Page Examples
=================

Here are just a few examples of the many web pages supported
by Fossil.  Follow hyperlinks on the examples below to see many
other examples.
<style>
.exbtn {
  border: 1px solid #000;
  margin: 1ex;
  border-radius: 1ex;
  padding: 0 1ex;
  background-color: #eee;
}
</style>

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?y=ci&n=100'>Example</a>
     100 most recent check-ins.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/finfo?name=src/file.c'>Example</a>
     All changes to the <b>src/file.c</b> source file.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=200&uf=0c3c2d086a'>Example</a>
     All check-ins using a particular version of the <b>src/file.c</b>
     source file.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=11&y=ci&c=2014-01-01'>Example</a>
     Check-ins proximate to an historical point in time (2014-01-01).

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=11&y=ci&c=2014-01-01&v=1'>Example</a>
     The previous example augmented with file changes.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=25&y=ci&a=1970-01-01'>Example</a>
     First 25 check-ins after 1970-01-01.  (The first 25 check-ins of
     the project.)

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=200&r=svn-import'>Example</a>
     All check-ins of the "svn-import" branch together with check-ins
     that merge with that branch.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=200&t=svn-import'>Example</a>
     All check-ins of the "svn-import" branch only.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?n=100&y=ci&ubg'>Example</a>
     100 most recent check-ins color coded by committer rather than by branch.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?from=version-1.27&to=version-1.28'>Example</a>
     All check-ins on the most direct path from
     version-1.27 to version-1.28

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?namechng'>Example</a>
     Show check-ins that contain file name changes

  *  <a target='_blank' class='exbtn'
     href='$ROOT/timeline?u=drh&c=2014-01-08&y=ci'>Example</a>
     Show check-ins circa 2014-01-08 by user "drh".

     <big><b>&rarr;</b></big> (Hint:  In the pages above, click the graph nodes
     for any two check-ins or files to see a diff.)
     <big><b>&larr;</b></big>

  *  <a target='_blank' class='exbtn'
     href='$ROOT/search?s=interesting+pages'>Example</a>
     Full-text search for "interesting pages".

  *  <a target='_blank' class='exbtn'
     href='$ROOT/tree?ci=daff9d20621&type=tree'>Example</a>
     All files for a particular check-in (daff9d20621480)

  *  <a target='_blank' class='exbtn'
     href='$ROOT/tree?ci=trunk&type=tree&mtime=1'>Example</a>
     All files for the latest check-in on a branch (trunk) sorted by
     last modification time.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/fileage?name=svn-import'>Example</a>
     Age of all files in the latest checking for branch "svn-import".

  *  <a target='_blank' class='exbtn'
     href='$ROOT/brlist'>Example</a>
     Table of branches.  (Click on column headers to sort.)

  *  <a target='_blank' class='exbtn'
     href='$ROOT/stat'>Example</a>
     Overall repository status.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/reports?type=ci&view=byuser'>Example</a>
     Number of check-ins per committer.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/reports?view=byfile'>Example</a>
     Number of check-ins for each source file.
     (Click on column headers to sort.)

  *  <a target='_blank' class='exbtn'
     href='$ROOT/blame?checkin=5260fbf63287&filename=src/rss.c&limit=-1'>
       Example</a>
     Most recent change to each line of a particular source file in a
     particular check-in.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/taglist'>Example</a>
     List of tags on check-ins.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/bigbloblist'>Example</a>
     The largest objects in the repository.

  *  <a target='_blank' class='exbtn'
     href='$ROOT/hash-collisions'>Example</a>
     SHA1 prefix collisions
Added www/xkcd-git.gif.

cannot compute difference between binary files