Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Merge updates from trunk. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | enhancedUndo |
Files: | files | file ages | folders |
SHA1: |
a02d847edb8c2d1e422fdc999d41fafc |
User & Date: | mistachkin 2015-07-08 17:31:45.312 |
Context
2015-07-11
| ||
07:39 | Merge updates from trunk. ... (Closed-Leaf check-in: 0e4247f9 user: mistachkin tags: enhancedUndo) | |
2015-07-10
| ||
09:47 | Merge trunk. Add built objects to "ignore-glob" for testing purposes ... (Closed-Leaf check-in: 2821e284 user: jan.nijtmans tags: enhancedUndo-test) | |
2015-07-08
| ||
17:31 | Merge updates from trunk. ... (check-in: a02d847e user: mistachkin tags: enhancedUndo) | |
17:16 | Port the fix from the previous check-in to the makemake Tcl script as well. ... (check-in: b9447b0e user: mistachkin tags: trunk) | |
2015-07-06
| ||
21:56 | Small grammar fix in comment. ... (check-in: 45c7dbb8 user: mistachkin tags: enhancedUndo) | |
Changes
Changes to src/makemake.tcl.
︙ | ︙ | |||
1322 1323 1324 1325 1326 1327 1328 | # Build the OpenSSL libraries? !ifndef FOSSIL_BUILD_SSL FOSSIL_BUILD_SSL = 0 !endif # Build the included zlib library? !ifndef FOSSIL_BUILD_ZLIB | | | 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 | # Build the OpenSSL libraries? !ifndef FOSSIL_BUILD_SSL FOSSIL_BUILD_SSL = 0 !endif # Build the included zlib library? !ifndef FOSSIL_BUILD_ZLIB FOSSIL_BUILD_ZLIB = 1 !endif # Link everything except SQLite dynamically? !ifndef FOSSIL_DYNAMIC_BUILD FOSSIL_DYNAMIC_BUILD = 0 !endif |
︙ | ︙ |
Changes to src/sqlite3.c.
︙ | ︙ | |||
323 324 325 326 327 328 329 | ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ #define SQLITE_VERSION "3.8.11" #define SQLITE_VERSION_NUMBER 3008011 | | | 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 | ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ #define SQLITE_VERSION "3.8.11" #define SQLITE_VERSION_NUMBER 3008011 #define SQLITE_SOURCE_ID "2015-07-08 16:22:42 5348ffc3fda5168c1e9e14aa88b0c6aedbda7c94" /* ** CAPI3REF: Run-Time Library Version Numbers ** KEYWORDS: sqlite3_version, sqlite3_sourceid ** ** These interfaces provide the same information as the [SQLITE_VERSION], ** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros |
︙ | ︙ | |||
6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 | #define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ #define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ /* ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** ** ^This interface returns a pointer the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument | > > > | 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 | #define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ #define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ #define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ #define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ #define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ /* ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** ** ^This interface returns a pointer the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument |
︙ | ︙ | |||
8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 | # define SQLITE_DEFAULT_WORKER_THREADS 0 #endif #if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS # undef SQLITE_MAX_WORKER_THREADS # define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS #endif /* ** GCC does not define the offsetof() macro so we'll have to do it ** ourselves. */ #ifndef offsetof #define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) | > > > > > > > > > > | 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 | # define SQLITE_DEFAULT_WORKER_THREADS 0 #endif #if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS # undef SQLITE_MAX_WORKER_THREADS # define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS #endif /* ** The default initial allocation for the pagecache when using separate ** pagecaches for each database connection. A positive number is the ** number of pages. A negative number N translations means that a buffer ** of -1024*N bytes is allocated and used for as many pages as it will hold. */ #ifndef SQLITE_DEFAULT_PCACHE_INITSZ # define SQLITE_DEFAULT_PCACHE_INITSZ 100 #endif /* ** GCC does not define the offsetof() macro so we'll have to do it ** ourselves. */ #ifndef offsetof #define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) |
︙ | ︙ | |||
14041 14042 14043 14044 14045 14046 14047 | SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */ SQLITE_MAX_MMAP_SIZE, /* mxMmap */ (void*)0, /* pScratch */ 0, /* szScratch */ 0, /* nScratch */ (void*)0, /* pPage */ 0, /* szPage */ | | | 14054 14055 14056 14057 14058 14059 14060 14061 14062 14063 14064 14065 14066 14067 14068 | SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */ SQLITE_MAX_MMAP_SIZE, /* mxMmap */ (void*)0, /* pScratch */ 0, /* szScratch */ 0, /* nScratch */ (void*)0, /* pPage */ 0, /* szPage */ SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */ 0, /* mxParserStack */ 0, /* sharedCacheEnabled */ SQLITE_SORTER_PMASZ, /* szPma */ /* All the rest should always be initialized to zero */ 0, /* isInit */ 0, /* inProgress */ 0, /* isMutexInit */ |
︙ | ︙ | |||
19452 19453 19454 19455 19456 19457 19458 | /* ** The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. If it returns NULL ** that means that a mutex could not be allocated. */ static sqlite3_mutex *debugMutexAlloc(int id){ | | | 19465 19466 19467 19468 19469 19470 19471 19472 19473 19474 19475 19476 19477 19478 19479 | /* ** The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. If it returns NULL ** that means that a mutex could not be allocated. */ static sqlite3_mutex *debugMutexAlloc(int id){ static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1]; sqlite3_debug_mutex *pNew = 0; switch( id ){ case SQLITE_MUTEX_FAST: case SQLITE_MUTEX_RECURSIVE: { pNew = sqlite3Malloc(sizeof(*pNew)); if( pNew ){ pNew->id = id; |
︙ | ︙ | |||
19667 19668 19669 19670 19671 19672 19673 19674 19675 19676 19677 19678 19679 19680 | ** <li> SQLITE_MUTEX_STATIC_OPEN ** <li> SQLITE_MUTEX_STATIC_PRNG ** <li> SQLITE_MUTEX_STATIC_LRU ** <li> SQLITE_MUTEX_STATIC_PMEM ** <li> SQLITE_MUTEX_STATIC_APP1 ** <li> SQLITE_MUTEX_STATIC_APP2 ** <li> SQLITE_MUTEX_STATIC_APP3 ** </ul> ** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does | > > > | 19680 19681 19682 19683 19684 19685 19686 19687 19688 19689 19690 19691 19692 19693 19694 19695 19696 | ** <li> SQLITE_MUTEX_STATIC_OPEN ** <li> SQLITE_MUTEX_STATIC_PRNG ** <li> SQLITE_MUTEX_STATIC_LRU ** <li> SQLITE_MUTEX_STATIC_PMEM ** <li> SQLITE_MUTEX_STATIC_APP1 ** <li> SQLITE_MUTEX_STATIC_APP2 ** <li> SQLITE_MUTEX_STATIC_APP3 ** <li> SQLITE_MUTEX_STATIC_VFS1 ** <li> SQLITE_MUTEX_STATIC_VFS2 ** <li> SQLITE_MUTEX_STATIC_VFS3 ** </ul> ** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does |
︙ | ︙ | |||
19695 19696 19697 19698 19699 19700 19701 19702 19703 19704 19705 19706 19707 19708 | ** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite3_mutex *pthreadMutexAlloc(int iType){ static sqlite3_mutex staticMutexes[] = { SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, | > > > | 19711 19712 19713 19714 19715 19716 19717 19718 19719 19720 19721 19722 19723 19724 19725 19726 19727 | ** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite3_mutex *pthreadMutexAlloc(int iType){ static sqlite3_mutex staticMutexes[] = { SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, |
︙ | ︙ | |||
20309 20310 20311 20312 20313 20314 20315 20316 20317 20318 20319 20320 20321 20322 | SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER }; static int winMutex_isInit = 0; static int winMutex_isNt = -1; /* <0 means "need to query" */ /* As the winMutexInit() and winMutexEnd() functions are called as part | > > > | 20328 20329 20330 20331 20332 20333 20334 20335 20336 20337 20338 20339 20340 20341 20342 20343 20344 | SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER }; static int winMutex_isInit = 0; static int winMutex_isNt = -1; /* <0 means "need to query" */ /* As the winMutexInit() and winMutexEnd() functions are called as part |
︙ | ︙ | |||
20380 20381 20382 20383 20384 20385 20386 20387 20388 20389 20390 20391 20392 20393 | ** <li> SQLITE_MUTEX_STATIC_OPEN ** <li> SQLITE_MUTEX_STATIC_PRNG ** <li> SQLITE_MUTEX_STATIC_LRU ** <li> SQLITE_MUTEX_STATIC_PMEM ** <li> SQLITE_MUTEX_STATIC_APP1 ** <li> SQLITE_MUTEX_STATIC_APP2 ** <li> SQLITE_MUTEX_STATIC_APP3 ** </ul> ** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does | > > > | 20402 20403 20404 20405 20406 20407 20408 20409 20410 20411 20412 20413 20414 20415 20416 20417 20418 | ** <li> SQLITE_MUTEX_STATIC_OPEN ** <li> SQLITE_MUTEX_STATIC_PRNG ** <li> SQLITE_MUTEX_STATIC_LRU ** <li> SQLITE_MUTEX_STATIC_PMEM ** <li> SQLITE_MUTEX_STATIC_APP1 ** <li> SQLITE_MUTEX_STATIC_APP2 ** <li> SQLITE_MUTEX_STATIC_APP3 ** <li> SQLITE_MUTEX_STATIC_VFS1 ** <li> SQLITE_MUTEX_STATIC_VFS2 ** <li> SQLITE_MUTEX_STATIC_VFS3 ** </ul> ** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does |
︙ | ︙ | |||
20791 20792 20793 20794 20795 20796 20797 | }else{ mem0.pScratchEnd = 0; sqlite3GlobalConfig.pScratch = 0; sqlite3GlobalConfig.szScratch = 0; sqlite3GlobalConfig.nScratch = 0; } if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 | | < | 20816 20817 20818 20819 20820 20821 20822 20823 20824 20825 20826 20827 20828 20829 20830 20831 20832 | }else{ mem0.pScratchEnd = 0; sqlite3GlobalConfig.pScratch = 0; sqlite3GlobalConfig.szScratch = 0; sqlite3GlobalConfig.nScratch = 0; } if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 || sqlite3GlobalConfig.nPage<=0 ){ sqlite3GlobalConfig.pPage = 0; sqlite3GlobalConfig.szPage = 0; } rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData); if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0)); return rc; } /* |
︙ | ︙ | |||
26520 26521 26522 26523 26524 26525 26526 | ** statements. e.g. ** ** unixEnterMutex() ** assert( unixMutexHeld() ); ** unixEnterLeave() */ static void unixEnterMutex(void){ | | | | | 26544 26545 26546 26547 26548 26549 26550 26551 26552 26553 26554 26555 26556 26557 26558 26559 26560 26561 26562 26563 26564 26565 | ** statements. e.g. ** ** unixEnterMutex() ** assert( unixMutexHeld() ); ** unixEnterLeave() */ static void unixEnterMutex(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } static void unixLeaveMutex(void){ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #ifdef SQLITE_DEBUG static int unixMutexHeld(void) { return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #endif #ifdef SQLITE_HAVE_OS_TRACE /* ** Helper function for printing out trace information from debugging |
︙ | ︙ | |||
37045 37046 37047 37048 37049 37050 37051 | ** statements. e.g. ** ** winShmEnterMutex() ** assert( winShmMutexHeld() ); ** winShmLeaveMutex() */ static void winShmEnterMutex(void){ | | | | | 37069 37070 37071 37072 37073 37074 37075 37076 37077 37078 37079 37080 37081 37082 37083 37084 37085 37086 37087 37088 37089 37090 | ** statements. e.g. ** ** winShmEnterMutex() ** assert( winShmMutexHeld() ); ** winShmLeaveMutex() */ static void winShmEnterMutex(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } static void winShmLeaveMutex(void){ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #ifndef NDEBUG static int winShmMutexHeld(void) { return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #endif /* ** Object used to represent a single file opened and mmapped to provide ** shared memory. When multiple threads all reference the same ** log-summary, each thread has its own winFile object, but they all |
︙ | ︙ | |||
40397 40398 40399 40400 40401 40402 40403 40404 | ************************************************************************* ** ** This file implements the default page cache implementation (the ** sqlite3_pcache interface). It also contains part of the implementation ** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. ** If the default page cache implementation is overridden, then neither of ** these two features are available. */ | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > < | 40421 40422 40423 40424 40425 40426 40427 40428 40429 40430 40431 40432 40433 40434 40435 40436 40437 40438 40439 40440 40441 40442 40443 40444 40445 40446 40447 40448 40449 40450 40451 40452 40453 40454 40455 40456 40457 40458 40459 40460 40461 40462 40463 40464 40465 40466 40467 40468 40469 40470 40471 40472 40473 40474 40475 40476 40477 40478 40479 40480 40481 40482 40483 40484 40485 40486 40487 40488 40489 40490 40491 40492 40493 40494 40495 40496 40497 40498 40499 | ************************************************************************* ** ** This file implements the default page cache implementation (the ** sqlite3_pcache interface). It also contains part of the implementation ** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. ** If the default page cache implementation is overridden, then neither of ** these two features are available. ** ** A Page cache line looks like this: ** ** ------------------------------------------------------------- ** | database page content | PgHdr1 | MemPage | PgHdr | ** ------------------------------------------------------------- ** ** The database page content is up front (so that buffer overreads tend to ** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage ** is the extension added by the btree.c module containing information such ** as the database page number and how that database page is used. PgHdr ** is added by the pcache.c layer and contains information used to keep track ** of which pages are "dirty". PgHdr1 is an extension added by this ** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page. ** PgHdr1 contains information needed to look up a page by its page number. ** The superclass sqlite3_pcache_page.pBuf points to the start of the ** database page content and sqlite3_pcache_page.pExtra points to PgHdr. ** ** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at ** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The ** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this ** size can vary according to architecture, compile-time options, and ** SQLite library version number. ** ** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained ** using a separate memory allocation from the database page content. This ** seeks to overcome the "clownshoe" problem (also called "internal ** fragmentation" in academic literature) of allocating a few bytes more ** than a power of two with the memory allocator rounding up to the next ** power of two, and leaving the rounded-up space unused. ** ** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates ** with this module. Information is passed back and forth as PgHdr1 pointers. ** ** The pcache.c and pager.c modules deal pointers to PgHdr objects. ** The btree.c module deals with pointers to MemPage objects. ** ** SOURCE OF PAGE CACHE MEMORY: ** ** Memory for a page might come from any of three sources: ** ** (1) The general-purpose memory allocator - sqlite3Malloc() ** (2) Global page-cache memory provided using sqlite3_config() with ** SQLITE_CONFIG_PAGECACHE. ** (3) PCache-local bulk allocation. ** ** The third case is a chunk of heap memory (defaulting to 100 pages worth) ** that is allocated when the page cache is created. The size of the local ** bulk allocation can be adjusted using ** ** sqlite3_config(SQLITE_CONFIG_PCACHE, 0, 0, N). ** ** If N is positive, then N pages worth of memory are allocated using a single ** sqlite3Malloc() call and that memory is used for the first N pages allocated. ** Or if N is negative, then -1024*N bytes of memory are allocated and used ** for as many pages as can be accomodated. ** ** Only one of (2) or (3) can be used. Once the memory available to (2) or ** (3) is exhausted, subsequent allocations fail over to the general-purpose ** memory allocator (1). ** ** Earlier versions of SQLite used only methods (1) and (2). But experiments ** show that method (3) with N==100 provides about a 5% performance boost for ** common workloads. */ typedef struct PCache1 PCache1; typedef struct PgHdr1 PgHdr1; typedef struct PgFreeslot PgFreeslot; typedef struct PGroup PGroup; /* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set |
︙ | ︙ | |||
40451 40452 40453 40454 40455 40456 40457 | struct PCache1 { /* Cache configuration parameters. Page size (szPage) and the purgeable ** flag (bPurgeable) are set when the cache is created. nMax may be ** modified at any time by a call to the pcache1Cachesize() method. ** The PGroup mutex must be held when accessing nMax. */ PGroup *pGroup; /* PGroup this cache belongs to */ | | | > > > > | | > | | 40538 40539 40540 40541 40542 40543 40544 40545 40546 40547 40548 40549 40550 40551 40552 40553 40554 40555 40556 40557 40558 40559 40560 40561 40562 40563 40564 40565 40566 40567 40568 40569 40570 40571 40572 40573 40574 40575 40576 40577 40578 40579 40580 40581 40582 40583 40584 40585 40586 40587 40588 40589 40590 40591 40592 40593 40594 40595 40596 40597 40598 40599 40600 40601 40602 40603 40604 40605 40606 40607 40608 40609 40610 40611 40612 40613 | struct PCache1 { /* Cache configuration parameters. Page size (szPage) and the purgeable ** flag (bPurgeable) are set when the cache is created. nMax may be ** modified at any time by a call to the pcache1Cachesize() method. ** The PGroup mutex must be held when accessing nMax. */ PGroup *pGroup; /* PGroup this cache belongs to */ int szPage; /* Size of database content section */ int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */ int szAlloc; /* Total size of one pcache line */ int bPurgeable; /* True if cache is purgeable */ unsigned int nMin; /* Minimum number of pages reserved */ unsigned int nMax; /* Configured "cache_size" value */ unsigned int n90pct; /* nMax*9/10 */ unsigned int iMaxKey; /* Largest key seen since xTruncate() */ /* Hash table of all pages. The following variables may only be accessed ** when the accessor is holding the PGroup mutex. */ unsigned int nRecyclable; /* Number of pages in the LRU list */ unsigned int nPage; /* Total number of pages in apHash */ unsigned int nHash; /* Number of slots in apHash[] */ PgHdr1 **apHash; /* Hash table for fast lookup by key */ PgHdr1 *pFree; /* List of unused pcache-local pages */ void *pBulk; /* Bulk memory used by pcache-local */ }; /* ** Each cache entry is represented by an instance of the following ** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of ** PgHdr1.pCache->szPage bytes is allocated directly before this structure ** in memory. */ struct PgHdr1 { sqlite3_pcache_page page; unsigned int iKey; /* Key value (page number) */ u8 isPinned; /* Page in use, not on the LRU list */ u8 isBulkLocal; /* This page from bulk local storage */ PgHdr1 *pNext; /* Next in hash table chain */ PCache1 *pCache; /* Cache that currently owns this page */ PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ }; /* ** Free slots in the allocator used to divide up the global page cache ** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism. */ struct PgFreeslot { PgFreeslot *pNext; /* Next free slot */ }; /* ** Global data used by this cache. */ static SQLITE_WSD struct PCacheGlobal { PGroup grp; /* The global PGroup for mode (2) */ /* Variables related to SQLITE_CONFIG_PAGECACHE settings. The ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all ** fixed at sqlite3_initialize() time and do not require mutex protection. ** The nFreeSlot and pFree values do require mutex protection. */ int isInit; /* True if initialized */ int separateCache; /* Use a new PGroup for each PCache */ int szSlot; /* Size of each free slot */ int nSlot; /* The number of pcache slots */ int nReserve; /* Try to keep nFreeSlot above this */ void *pStart, *pEnd; /* Bounds of global page cache memory */ /* Above requires no mutex. Use mutex below for variable that follow. */ sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ /* The following value requires a mutex to change. We skip the mutex on ** reading because (1) most platforms read a 32-bit integer atomically and ** (2) even if an incorrect value is read, no great harm is done since this |
︙ | ︙ | |||
40554 40555 40556 40557 40558 40559 40560 40561 40562 40563 40564 40565 40566 40567 | ** ** This routine is called from sqlite3_initialize() and so it is guaranteed ** to be serialized already. There is no need for further mutexing. */ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ if( pcache1.isInit ){ PgFreeslot *p; sz = ROUNDDOWN8(sz); pcache1.szSlot = sz; pcache1.nSlot = pcache1.nFreeSlot = n; pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; pcache1.bUnderPressure = 0; | > | 40646 40647 40648 40649 40650 40651 40652 40653 40654 40655 40656 40657 40658 40659 40660 | ** ** This routine is called from sqlite3_initialize() and so it is guaranteed ** to be serialized already. There is no need for further mutexing. */ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ if( pcache1.isInit ){ PgFreeslot *p; if( pBuf==0 ) sz = n = 0; sz = ROUNDDOWN8(sz); pcache1.szSlot = sz; pcache1.nSlot = pcache1.nFreeSlot = n; pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; pcache1.bUnderPressure = 0; |
︙ | ︙ | |||
40618 40619 40620 40621 40622 40623 40624 | } return p; } /* ** Free an allocated buffer obtained from pcache1Alloc(). */ | | | < > < | 40711 40712 40713 40714 40715 40716 40717 40718 40719 40720 40721 40722 40723 40724 40725 40726 40727 40728 40729 40730 40731 40732 40733 40734 40735 40736 40737 40738 40739 40740 40741 40742 40743 40744 40745 40746 40747 40748 40749 | } return p; } /* ** Free an allocated buffer obtained from pcache1Alloc(). */ static void pcache1Free(void *p){ int nFreed = 0; if( p==0 ) return; if( p>=pcache1.pStart && p<pcache1.pEnd ){ PgFreeslot *pSlot; sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_USED, 1); pSlot = (PgFreeslot*)p; pSlot->pNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; pcache1.bUnderPressure = pcache1.nFreeSlot<pcache1.nReserve; assert( pcache1.nFreeSlot<=pcache1.nSlot ); sqlite3_mutex_leave(pcache1.mutex); }else{ assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) ); sqlite3MemdebugSetType(p, MEMTYPE_HEAP); #ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS nFreed = sqlite3MallocSize(p); sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_OVERFLOW, nFreed); sqlite3_mutex_leave(pcache1.mutex); #endif sqlite3_free(p); } } #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT /* ** Return the size of a pcache allocation */ static int pcache1MemSize(void *p){ |
︙ | ︙ | |||
40671 40672 40673 40674 40675 40676 40677 | /* ** Allocate a new page object initially associated with cache pCache. */ static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ PgHdr1 *p = 0; void *pPg; | > > > > > > > | | | > | | > | | | | | | | | | > | | | > > | | | | < < < < < < > | | | > > > > > | | < | 40763 40764 40765 40766 40767 40768 40769 40770 40771 40772 40773 40774 40775 40776 40777 40778 40779 40780 40781 40782 40783 40784 40785 40786 40787 40788 40789 40790 40791 40792 40793 40794 40795 40796 40797 40798 40799 40800 40801 40802 40803 40804 40805 40806 40807 40808 40809 40810 40811 40812 40813 40814 40815 40816 40817 40818 40819 40820 40821 40822 40823 40824 40825 40826 40827 40828 40829 40830 40831 40832 40833 40834 40835 | /* ** Allocate a new page object initially associated with cache pCache. */ static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ PgHdr1 *p = 0; void *pPg; assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); if( pCache->pFree ){ p = pCache->pFree; pCache->pFree = p->pNext; p->pNext = 0; }else{ #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT /* The group mutex must be released before pcache1Alloc() is called. This ** is because it might call sqlite3_release_memory(), which assumes that ** this mutex is not held. */ assert( pcache1.separateCache==0 ); assert( pCache->pGroup==&pcache1.grp ); pcache1LeaveMutex(pCache->pGroup); #endif #ifdef SQLITE_PCACHE_SEPARATE_HEADER pPg = pcache1Alloc(pCache->szPage); p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); if( !pPg || !p ){ pcache1Free(pPg); sqlite3_free(p); pPg = 0; } #else pPg = pcache1Alloc(pCache->szAlloc); p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; #endif #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT pcache1EnterMutex(pCache->pGroup); #endif if( pPg==0 ) return 0; p->page.pBuf = pPg; p->page.pExtra = &p[1]; p->isBulkLocal = 0; } if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage++; } return p; } /* ** Free a page object allocated by pcache1AllocPage(). */ static void pcache1FreePage(PgHdr1 *p){ PCache1 *pCache; assert( p!=0 ); pCache = p->pCache; assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); if( p->isBulkLocal ){ p->pNext = pCache->pFree; pCache->pFree = p; }else{ pcache1Free(p->page.pBuf); #ifdef SQLITE_PCACHE_SEPARATE_HEADER sqlite3_free(p); #endif } if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage--; } } /* ** Malloc function used by SQLite to obtain space from the buffer configured ** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer ** exists, this function falls back to sqlite3Malloc(). |
︙ | ︙ | |||
40918 40919 40920 40921 40922 40923 40924 40925 40926 40927 40928 40929 40930 40931 | /* ** Implementation of the sqlite3_pcache.xInit method. */ static int pcache1Init(void *NotUsed){ UNUSED_PARAMETER(NotUsed); assert( pcache1.isInit==0 ); memset(&pcache1, 0, sizeof(pcache1)); #if SQLITE_THREADSAFE if( sqlite3GlobalConfig.bCoreMutex ){ pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); } #endif pcache1.grp.mxPinned = 10; | > > > > > > > > > > > > > > > > > > > > > > > > > | 41021 41022 41023 41024 41025 41026 41027 41028 41029 41030 41031 41032 41033 41034 41035 41036 41037 41038 41039 41040 41041 41042 41043 41044 41045 41046 41047 41048 41049 41050 41051 41052 41053 41054 41055 41056 41057 41058 41059 | /* ** Implementation of the sqlite3_pcache.xInit method. */ static int pcache1Init(void *NotUsed){ UNUSED_PARAMETER(NotUsed); assert( pcache1.isInit==0 ); memset(&pcache1, 0, sizeof(pcache1)); /* ** The pcache1.separateCache variable is true if each PCache has its own ** private PGroup (mode-1). pcache1.separateCache is false if the single ** PGroup in pcache1.grp is used for all page caches (mode-2). ** ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT ** ** * Use a unified cache in single-threaded applications that have ** configured a start-time buffer for use as page-cache memory using ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL ** pBuf argument. ** ** * Otherwise use separate caches (mode-1) */ #if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) pcache1.separateCache = 0; #elif SQLITE_THREADSAFE pcache1.separateCache = sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.bCoreMutex>0; #else pcache1.separateCache = sqlite3GlobalConfig.pPage==0; #endif #if SQLITE_THREADSAFE if( sqlite3GlobalConfig.bCoreMutex ){ pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); } #endif pcache1.grp.mxPinned = 10; |
︙ | ︙ | |||
40953 40954 40955 40956 40957 40958 40959 | ** Allocate a new cache. */ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ int sz; /* Bytes of memory required to allocate the new cache */ | < < < < < < < < < < < < < < < < < < | | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 41081 41082 41083 41084 41085 41086 41087 41088 41089 41090 41091 41092 41093 41094 41095 41096 41097 41098 41099 41100 41101 41102 41103 41104 41105 41106 41107 41108 41109 41110 41111 41112 41113 41114 41115 41116 41117 41118 41119 41120 41121 41122 41123 41124 41125 41126 41127 41128 41129 41130 41131 41132 41133 41134 41135 41136 41137 41138 41139 41140 41141 41142 41143 41144 41145 41146 41147 41148 41149 | ** Allocate a new cache. */ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ int sz; /* Bytes of memory required to allocate the new cache */ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); assert( szExtra < 300 ); sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache; pCache = (PCache1 *)sqlite3MallocZero(sz); if( pCache ){ if( pcache1.separateCache ){ pGroup = (PGroup*)&pCache[1]; pGroup->mxPinned = 10; }else{ pGroup = &pcache1.grp; } pCache->pGroup = pGroup; pCache->szPage = szPage; pCache->szExtra = szExtra; pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1)); pCache->bPurgeable = (bPurgeable ? 1 : 0); pcache1EnterMutex(pGroup); pcache1ResizeHash(pCache); if( bPurgeable ){ pCache->nMin = 10; pGroup->nMinPage += pCache->nMin; pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; } pcache1LeaveMutex(pGroup); /* Try to initialize the local bulk pagecache line allocation if using ** separate caches and if nPage!=0 */ if( pcache1.separateCache && sqlite3GlobalConfig.nPage!=0 && sqlite3GlobalConfig.pPage==0 ){ int szBulk; char *zBulk; sqlite3BeginBenignMalloc(); if( sqlite3GlobalConfig.nPage>0 ){ szBulk = pCache->szAlloc * sqlite3GlobalConfig.nPage; }else{ szBulk = -1024*sqlite3GlobalConfig.nPage; } zBulk = pCache->pBulk = sqlite3Malloc( szBulk ); sqlite3EndBenignMalloc(); if( zBulk ){ int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc; int i; for(i=0; i<nBulk; i++){ PgHdr1 *pX = (PgHdr1*)&zBulk[szPage]; pX->page.pBuf = zBulk; pX->page.pExtra = &pX[1]; pX->isBulkLocal = 1; pX->pNext = pCache->pFree; pCache->pFree = pX; zBulk += pCache->szAlloc; } } } if( pCache->nHash==0 ){ pcache1Destroy((sqlite3_pcache*)pCache); pCache = 0; } } return (sqlite3_pcache *)pCache; } |
︙ | ︙ | |||
41088 41089 41090 41091 41092 41093 41094 | return 0; } if( pCache->nPage>=pCache->nHash ) pcache1ResizeHash(pCache); assert( pCache->nHash>0 && pCache->apHash ); /* Step 4. Try to recycle a page. */ | | < | | | < < < < < < | < < | 41229 41230 41231 41232 41233 41234 41235 41236 41237 41238 41239 41240 41241 41242 41243 41244 41245 41246 41247 41248 41249 41250 41251 41252 41253 | return 0; } if( pCache->nPage>=pCache->nHash ) pcache1ResizeHash(pCache); assert( pCache->nHash>0 && pCache->apHash ); /* Step 4. Try to recycle a page. */ if( pCache->bPurgeable && pGroup->pLruTail && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache)) ){ PCache1 *pOther; pPage = pGroup->pLruTail; assert( pPage->isPinned==0 ); pcache1RemoveFromHash(pPage, 0); pcache1PinPage(pPage); pOther = pPage->pCache; if( pOther->szAlloc != pCache->szAlloc ){ pcache1FreePage(pPage); pPage = 0; }else{ pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable); } } |
︙ | ︙ | |||
41383 41384 41385 41386 41387 41388 41389 41390 41391 41392 41393 41394 41395 41396 | assert( pGroup->nMaxPage >= pCache->nMax ); pGroup->nMaxPage -= pCache->nMax; assert( pGroup->nMinPage >= pCache->nMin ); pGroup->nMinPage -= pCache->nMin; pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; pcache1EnforceMaxPage(pGroup); pcache1LeaveMutex(pGroup); sqlite3_free(pCache->apHash); sqlite3_free(pCache); } /* ** This function is called during initialization (sqlite3_initialize()) to ** install the default pluggable cache module, assuming the user has not | > | 41515 41516 41517 41518 41519 41520 41521 41522 41523 41524 41525 41526 41527 41528 41529 | assert( pGroup->nMaxPage >= pCache->nMax ); pGroup->nMaxPage -= pCache->nMax; assert( pGroup->nMinPage >= pCache->nMin ); pGroup->nMinPage -= pCache->nMin; pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; pcache1EnforceMaxPage(pGroup); pcache1LeaveMutex(pGroup); sqlite3_free(pCache->pBulk); sqlite3_free(pCache->apHash); sqlite3_free(pCache); } /* ** This function is called during initialization (sqlite3_initialize()) to ** install the default pluggable cache module, assuming the user has not |
︙ | ︙ | |||
41438 41439 41440 41441 41442 41443 41444 | ** been released, the function returns. The return value is the total number ** of bytes of memory released. */ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ int nFree = 0; assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); assert( sqlite3_mutex_notheld(pcache1.mutex) ); | | | 41571 41572 41573 41574 41575 41576 41577 41578 41579 41580 41581 41582 41583 41584 41585 | ** been released, the function returns. The return value is the total number ** of bytes of memory released. */ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ int nFree = 0; assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); assert( sqlite3_mutex_notheld(pcache1.mutex) ); if( sqlite3GlobalConfig.nPage==0 ){ PgHdr1 *p; pcache1EnterMutex(&pcache1.grp); while( (nReq<0 || nFree<nReq) && ((p=pcache1.grp.pLruTail)!=0) ){ nFree += pcache1MemSize(p->page.pBuf); #ifdef SQLITE_PCACHE_SEPARATE_HEADER nFree += sqlite3MemSize(p); #endif |
︙ | ︙ | |||
62540 62541 62542 62543 62544 62545 62546 62547 62548 62549 62550 62551 62552 62553 62554 62555 62556 62557 62558 62559 62560 62561 62562 62563 62564 62565 62566 62567 62568 62569 62570 | u32 usableSize; /* Usable size of the page */ u32 contentOffset; /* Offset to the start of the cell content area */ u32 *heap = 0; /* Min-heap used for checking cell coverage */ u32 x, prev = 0; /* Next and previous entry on the min-heap */ const char *saved_zPfx = pCheck->zPfx; int saved_v1 = pCheck->v1; int saved_v2 = pCheck->v2; /* Check that the page exists */ pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; pCheck->zPfx = "Page %d: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); goto end_of_check; } /* Clear MemPage.isInit to make sure the corruption detection code in ** btreeInitPage() is executed. */ pPage->isInit = 0; if( (rc = btreeInitPage(pPage))!=0 ){ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ checkAppendMsg(pCheck, "btreeInitPage() returns error code %d", rc); goto end_of_check; } | > > | 62673 62674 62675 62676 62677 62678 62679 62680 62681 62682 62683 62684 62685 62686 62687 62688 62689 62690 62691 62692 62693 62694 62695 62696 62697 62698 62699 62700 62701 62702 62703 62704 62705 | u32 usableSize; /* Usable size of the page */ u32 contentOffset; /* Offset to the start of the cell content area */ u32 *heap = 0; /* Min-heap used for checking cell coverage */ u32 x, prev = 0; /* Next and previous entry on the min-heap */ const char *saved_zPfx = pCheck->zPfx; int saved_v1 = pCheck->v1; int saved_v2 = pCheck->v2; u8 savedIsInit; /* Check that the page exists */ pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; pCheck->zPfx = "Page %d: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); goto end_of_check; } /* Clear MemPage.isInit to make sure the corruption detection code in ** btreeInitPage() is executed. */ savedIsInit = pPage->isInit; pPage->isInit = 0; if( (rc = btreeInitPage(pPage))!=0 ){ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ checkAppendMsg(pCheck, "btreeInitPage() returns error code %d", rc); goto end_of_check; } |
︙ | ︙ | |||
62698 62699 62700 62701 62702 62703 62704 | */ i = get2byte(&data[hdr+1]); while( i>0 ){ int size, j; assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */ size = get2byte(&data[i+2]); assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */ | | | 62833 62834 62835 62836 62837 62838 62839 62840 62841 62842 62843 62844 62845 62846 62847 | */ i = get2byte(&data[hdr+1]); while( i>0 ){ int size, j; assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */ size = get2byte(&data[i+2]); assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */ btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1)); /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a ** big-endian integer which is the offset in the b-tree page of the next ** freeblock in the chain, or zero if the freeblock is the last on the ** chain. */ j = get2byte(&data[i]); /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of ** increasing offset. */ |
︙ | ︙ | |||
62749 62750 62751 62752 62753 62754 62755 62756 62757 62758 62759 62760 62761 62762 | checkAppendMsg(pCheck, "Fragmentation of %d bytes reported as %d on page %d", nFrag, data[hdr+7], iPage); } } end_of_check: releasePage(pPage); pCheck->zPfx = saved_zPfx; pCheck->v1 = saved_v1; pCheck->v2 = saved_v2; return depth+1; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ | > | 62884 62885 62886 62887 62888 62889 62890 62891 62892 62893 62894 62895 62896 62897 62898 | checkAppendMsg(pCheck, "Fragmentation of %d bytes reported as %d on page %d", nFrag, data[hdr+7], iPage); } } end_of_check: if( !doCoverageCheck ) pPage->isInit = savedIsInit; releasePage(pPage); pCheck->zPfx = saved_zPfx; pCheck->v1 = saved_v1; pCheck->v2 = saved_v2; return depth+1; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ |
︙ | ︙ | |||
69018 69019 69020 69021 69022 69023 69024 69025 69026 69027 69028 69029 69030 69031 | ** the unnecessary initialization has a measurable negative performance ** impact, since this routine is a very high runner. And so, we choose ** to ignore the compiler warnings and leave this variable uninitialized. */ /* mem1.u.i = 0; // not needed, here to silence compiler warning */ idx1 = getVarint32(aKey1, szHdr1); d1 = szHdr1; assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); assert( pKeyInfo->aSortOrder!=0 ); assert( pKeyInfo->nField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); do{ u32 serial_type1; | > | 69154 69155 69156 69157 69158 69159 69160 69161 69162 69163 69164 69165 69166 69167 69168 | ** the unnecessary initialization has a measurable negative performance ** impact, since this routine is a very high runner. And so, we choose ** to ignore the compiler warnings and leave this variable uninitialized. */ /* mem1.u.i = 0; // not needed, here to silence compiler warning */ idx1 = getVarint32(aKey1, szHdr1); if( szHdr1>98307 ) return SQLITE_CORRUPT; d1 = szHdr1; assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); assert( pKeyInfo->aSortOrder!=0 ); assert( pKeyInfo->nField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); do{ u32 serial_type1; |
︙ | ︙ | |||
109377 109378 109379 109380 109381 109382 109383 | VdbeCoverage(v); } sqlite3VdbeResolveLabel(v, addrCont); /* Execute the recursive SELECT taking the single row in Current as ** the value for the recursive-table. Store the results in the Queue. */ | > > > | | | | > | 109514 109515 109516 109517 109518 109519 109520 109521 109522 109523 109524 109525 109526 109527 109528 109529 109530 109531 109532 109533 109534 109535 | VdbeCoverage(v); } sqlite3VdbeResolveLabel(v, addrCont); /* Execute the recursive SELECT taking the single row in Current as ** the value for the recursive-table. Store the results in the Queue. */ if( p->selFlags & SF_Aggregate ){ sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); }else{ p->pPrior = 0; sqlite3Select(pParse, p, &destQueue); assert( p->pPrior==0 ); p->pPrior = pSetup; } /* Keep running the loop until the Queue is empty */ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); sqlite3VdbeResolveLabel(v, addrBreak); end_of_recursive_query: sqlite3ExprListDelete(pParse->db, p->pOrderBy); |
︙ | ︙ |
Changes to src/sqlite3.h.
︙ | ︙ | |||
109 110 111 112 113 114 115 | ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ #define SQLITE_VERSION "3.8.11" #define SQLITE_VERSION_NUMBER 3008011 | | | 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ #define SQLITE_VERSION "3.8.11" #define SQLITE_VERSION_NUMBER 3008011 #define SQLITE_SOURCE_ID "2015-07-08 16:22:42 5348ffc3fda5168c1e9e14aa88b0c6aedbda7c94" /* ** CAPI3REF: Run-Time Library Version Numbers ** KEYWORDS: sqlite3_version, sqlite3_sourceid ** ** These interfaces provide the same information as the [SQLITE_VERSION], ** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros |
︙ | ︙ | |||
6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 | #define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ #define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ /* ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** ** ^This interface returns a pointer the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument | > > > | 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 | #define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ #define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ #define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ #define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ #define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ /* ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** ** ^This interface returns a pointer the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument |
︙ | ︙ |
Changes to win/Makefile.msc.
︙ | ︙ | |||
37 38 39 40 41 42 43 | # Build the OpenSSL libraries? !ifndef FOSSIL_BUILD_SSL FOSSIL_BUILD_SSL = 0 !endif # Build the included zlib library? !ifndef FOSSIL_BUILD_ZLIB | | | 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | # Build the OpenSSL libraries? !ifndef FOSSIL_BUILD_SSL FOSSIL_BUILD_SSL = 0 !endif # Build the included zlib library? !ifndef FOSSIL_BUILD_ZLIB FOSSIL_BUILD_ZLIB = 1 !endif # Link everything except SQLite dynamically? !ifndef FOSSIL_DYNAMIC_BUILD FOSSIL_DYNAMIC_BUILD = 0 !endif |
︙ | ︙ |