本站源代码
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

698 lines
19KB

  1. // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
  2. // All rights reserved.
  3. //
  4. // Use of this source code is governed by a BSD-style license that can be
  5. // found in the LICENSE file.
  6. // Package opt provides sets of options used by LevelDB.
  7. package opt
  8. import (
  9. "math"
  10. "github.com/syndtr/goleveldb/leveldb/cache"
  11. "github.com/syndtr/goleveldb/leveldb/comparer"
  12. "github.com/syndtr/goleveldb/leveldb/filter"
  13. )
  14. const (
  15. KiB = 1024
  16. MiB = KiB * 1024
  17. GiB = MiB * 1024
  18. )
  19. var (
  20. DefaultBlockCacher = LRUCacher
  21. DefaultBlockCacheCapacity = 8 * MiB
  22. DefaultBlockRestartInterval = 16
  23. DefaultBlockSize = 4 * KiB
  24. DefaultCompactionExpandLimitFactor = 25
  25. DefaultCompactionGPOverlapsFactor = 10
  26. DefaultCompactionL0Trigger = 4
  27. DefaultCompactionSourceLimitFactor = 1
  28. DefaultCompactionTableSize = 2 * MiB
  29. DefaultCompactionTableSizeMultiplier = 1.0
  30. DefaultCompactionTotalSize = 10 * MiB
  31. DefaultCompactionTotalSizeMultiplier = 10.0
  32. DefaultCompressionType = SnappyCompression
  33. DefaultIteratorSamplingRate = 1 * MiB
  34. DefaultOpenFilesCacher = LRUCacher
  35. DefaultOpenFilesCacheCapacity = 500
  36. DefaultWriteBuffer = 4 * MiB
  37. DefaultWriteL0PauseTrigger = 12
  38. DefaultWriteL0SlowdownTrigger = 8
  39. )
  40. // Cacher is a caching algorithm.
  41. type Cacher interface {
  42. New(capacity int) cache.Cacher
  43. }
  44. type CacherFunc struct {
  45. NewFunc func(capacity int) cache.Cacher
  46. }
  47. func (f *CacherFunc) New(capacity int) cache.Cacher {
  48. if f.NewFunc != nil {
  49. return f.NewFunc(capacity)
  50. }
  51. return nil
  52. }
  53. func noCacher(int) cache.Cacher { return nil }
  54. var (
  55. // LRUCacher is the LRU-cache algorithm.
  56. LRUCacher = &CacherFunc{cache.NewLRU}
  57. // NoCacher is the value to disable caching algorithm.
  58. NoCacher = &CacherFunc{}
  59. )
  60. // Compression is the 'sorted table' block compression algorithm to use.
  61. type Compression uint
  62. func (c Compression) String() string {
  63. switch c {
  64. case DefaultCompression:
  65. return "default"
  66. case NoCompression:
  67. return "none"
  68. case SnappyCompression:
  69. return "snappy"
  70. }
  71. return "invalid"
  72. }
  73. const (
  74. DefaultCompression Compression = iota
  75. NoCompression
  76. SnappyCompression
  77. nCompression
  78. )
  79. // Strict is the DB 'strict level'.
  80. type Strict uint
  81. const (
  82. // If present then a corrupted or invalid chunk or block in manifest
  83. // journal will cause an error instead of being dropped.
  84. // This will prevent database with corrupted manifest to be opened.
  85. StrictManifest Strict = 1 << iota
  86. // If present then journal chunk checksum will be verified.
  87. StrictJournalChecksum
  88. // If present then a corrupted or invalid chunk or block in journal
  89. // will cause an error instead of being dropped.
  90. // This will prevent database with corrupted journal to be opened.
  91. StrictJournal
  92. // If present then 'sorted table' block checksum will be verified.
  93. // This has effect on both 'read operation' and compaction.
  94. StrictBlockChecksum
  95. // If present then a corrupted 'sorted table' will fails compaction.
  96. // The database will enter read-only mode.
  97. StrictCompaction
  98. // If present then a corrupted 'sorted table' will halts 'read operation'.
  99. StrictReader
  100. // If present then leveldb.Recover will drop corrupted 'sorted table'.
  101. StrictRecovery
  102. // This only applicable for ReadOptions, if present then this ReadOptions
  103. // 'strict level' will override global ones.
  104. StrictOverride
  105. // StrictAll enables all strict flags.
  106. StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
  107. // DefaultStrict is the default strict flags. Specify any strict flags
  108. // will override default strict flags as whole (i.e. not OR'ed).
  109. DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
  110. // NoStrict disables all strict flags. Override default strict flags.
  111. NoStrict = ^StrictAll
  112. )
  113. // Options holds the optional parameters for the DB at large.
  114. type Options struct {
  115. // AltFilters defines one or more 'alternative filters'.
  116. // 'alternative filters' will be used during reads if a filter block
  117. // does not match with the 'effective filter'.
  118. //
  119. // The default value is nil
  120. AltFilters []filter.Filter
  121. // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
  122. // Specify NoCacher to disable caching algorithm.
  123. //
  124. // The default value is LRUCacher.
  125. BlockCacher Cacher
  126. // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
  127. // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
  128. //
  129. // The default value is 8MiB.
  130. BlockCacheCapacity int
  131. // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging
  132. // to removed 'sorted table'.
  133. //
  134. // The default if false.
  135. BlockCacheEvictRemoved bool
  136. // BlockRestartInterval is the number of keys between restart points for
  137. // delta encoding of keys.
  138. //
  139. // The default value is 16.
  140. BlockRestartInterval int
  141. // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
  142. // block.
  143. //
  144. // The default value is 4KiB.
  145. BlockSize int
  146. // CompactionExpandLimitFactor limits compaction size after expanded.
  147. // This will be multiplied by table size limit at compaction target level.
  148. //
  149. // The default value is 25.
  150. CompactionExpandLimitFactor int
  151. // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
  152. // single 'sorted table' generates.
  153. // This will be multiplied by table size limit at grandparent level.
  154. //
  155. // The default value is 10.
  156. CompactionGPOverlapsFactor int
  157. // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
  158. // trigger compaction.
  159. //
  160. // The default value is 4.
  161. CompactionL0Trigger int
  162. // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
  163. // level-0.
  164. // This will be multiplied by table size limit at compaction target level.
  165. //
  166. // The default value is 1.
  167. CompactionSourceLimitFactor int
  168. // CompactionTableSize limits size of 'sorted table' that compaction generates.
  169. // The limits for each level will be calculated as:
  170. // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
  171. // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
  172. //
  173. // The default value is 2MiB.
  174. CompactionTableSize int
  175. // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
  176. //
  177. // The default value is 1.
  178. CompactionTableSizeMultiplier float64
  179. // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
  180. // CompactionTableSize.
  181. // Use zero to skip a level.
  182. //
  183. // The default value is nil.
  184. CompactionTableSizeMultiplierPerLevel []float64
  185. // CompactionTotalSize limits total size of 'sorted table' for each level.
  186. // The limits for each level will be calculated as:
  187. // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
  188. // The multiplier for each level can also fine-tuned using
  189. // CompactionTotalSizeMultiplierPerLevel.
  190. //
  191. // The default value is 10MiB.
  192. CompactionTotalSize int
  193. // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
  194. //
  195. // The default value is 10.
  196. CompactionTotalSizeMultiplier float64
  197. // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
  198. // CompactionTotalSize.
  199. // Use zero to skip a level.
  200. //
  201. // The default value is nil.
  202. CompactionTotalSizeMultiplierPerLevel []float64
  203. // Comparer defines a total ordering over the space of []byte keys: a 'less
  204. // than' relationship. The same comparison algorithm must be used for reads
  205. // and writes over the lifetime of the DB.
  206. //
  207. // The default value uses the same ordering as bytes.Compare.
  208. Comparer comparer.Comparer
  209. // Compression defines the 'sorted table' block compression to use.
  210. //
  211. // The default value (DefaultCompression) uses snappy compression.
  212. Compression Compression
  213. // DisableBufferPool allows disable use of util.BufferPool functionality.
  214. //
  215. // The default value is false.
  216. DisableBufferPool bool
  217. // DisableBlockCache allows disable use of cache.Cache functionality on
  218. // 'sorted table' block.
  219. //
  220. // The default value is false.
  221. DisableBlockCache bool
  222. // DisableCompactionBackoff allows disable compaction retry backoff.
  223. //
  224. // The default value is false.
  225. DisableCompactionBackoff bool
  226. // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
  227. // on large batch write. If enable batch writes large than WriteBuffer will
  228. // use transaction.
  229. //
  230. // The default is false.
  231. DisableLargeBatchTransaction bool
  232. // ErrorIfExist defines whether an error should returned if the DB already
  233. // exist.
  234. //
  235. // The default value is false.
  236. ErrorIfExist bool
  237. // ErrorIfMissing defines whether an error should returned if the DB is
  238. // missing. If false then the database will be created if missing, otherwise
  239. // an error will be returned.
  240. //
  241. // The default value is false.
  242. ErrorIfMissing bool
  243. // Filter defines an 'effective filter' to use. An 'effective filter'
  244. // if defined will be used to generate per-table filter block.
  245. // The filter name will be stored on disk.
  246. // During reads LevelDB will try to find matching filter from
  247. // 'effective filter' and 'alternative filters'.
  248. //
  249. // Filter can be changed after a DB has been created. It is recommended
  250. // to put old filter to the 'alternative filters' to mitigate lack of
  251. // filter during transition period.
  252. //
  253. // A filter is used to reduce disk reads when looking for a specific key.
  254. //
  255. // The default value is nil.
  256. Filter filter.Filter
  257. // IteratorSamplingRate defines approximate gap (in bytes) between read
  258. // sampling of an iterator. The samples will be used to determine when
  259. // compaction should be triggered.
  260. //
  261. // The default is 1MiB.
  262. IteratorSamplingRate int
  263. // NoSync allows completely disable fsync.
  264. //
  265. // The default is false.
  266. NoSync bool
  267. // NoWriteMerge allows disabling write merge.
  268. //
  269. // The default is false.
  270. NoWriteMerge bool
  271. // OpenFilesCacher provides cache algorithm for open files caching.
  272. // Specify NoCacher to disable caching algorithm.
  273. //
  274. // The default value is LRUCacher.
  275. OpenFilesCacher Cacher
  276. // OpenFilesCacheCapacity defines the capacity of the open files caching.
  277. // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
  278. //
  279. // The default value is 500.
  280. OpenFilesCacheCapacity int
  281. // If true then opens DB in read-only mode.
  282. //
  283. // The default value is false.
  284. ReadOnly bool
  285. // Strict defines the DB strict level.
  286. Strict Strict
  287. // WriteBuffer defines maximum size of a 'memdb' before flushed to
  288. // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
  289. // unsorted journal.
  290. //
  291. // LevelDB may held up to two 'memdb' at the same time.
  292. //
  293. // The default value is 4MiB.
  294. WriteBuffer int
  295. // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
  296. // pause write.
  297. //
  298. // The default value is 12.
  299. WriteL0PauseTrigger int
  300. // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
  301. // will trigger write slowdown.
  302. //
  303. // The default value is 8.
  304. WriteL0SlowdownTrigger int
  305. }
  306. func (o *Options) GetAltFilters() []filter.Filter {
  307. if o == nil {
  308. return nil
  309. }
  310. return o.AltFilters
  311. }
  312. func (o *Options) GetBlockCacher() Cacher {
  313. if o == nil || o.BlockCacher == nil {
  314. return DefaultBlockCacher
  315. } else if o.BlockCacher == NoCacher {
  316. return nil
  317. }
  318. return o.BlockCacher
  319. }
  320. func (o *Options) GetBlockCacheCapacity() int {
  321. if o == nil || o.BlockCacheCapacity == 0 {
  322. return DefaultBlockCacheCapacity
  323. } else if o.BlockCacheCapacity < 0 {
  324. return 0
  325. }
  326. return o.BlockCacheCapacity
  327. }
  328. func (o *Options) GetBlockCacheEvictRemoved() bool {
  329. if o == nil {
  330. return false
  331. }
  332. return o.BlockCacheEvictRemoved
  333. }
  334. func (o *Options) GetBlockRestartInterval() int {
  335. if o == nil || o.BlockRestartInterval <= 0 {
  336. return DefaultBlockRestartInterval
  337. }
  338. return o.BlockRestartInterval
  339. }
  340. func (o *Options) GetBlockSize() int {
  341. if o == nil || o.BlockSize <= 0 {
  342. return DefaultBlockSize
  343. }
  344. return o.BlockSize
  345. }
  346. func (o *Options) GetCompactionExpandLimit(level int) int {
  347. factor := DefaultCompactionExpandLimitFactor
  348. if o != nil && o.CompactionExpandLimitFactor > 0 {
  349. factor = o.CompactionExpandLimitFactor
  350. }
  351. return o.GetCompactionTableSize(level+1) * factor
  352. }
  353. func (o *Options) GetCompactionGPOverlaps(level int) int {
  354. factor := DefaultCompactionGPOverlapsFactor
  355. if o != nil && o.CompactionGPOverlapsFactor > 0 {
  356. factor = o.CompactionGPOverlapsFactor
  357. }
  358. return o.GetCompactionTableSize(level+2) * factor
  359. }
  360. func (o *Options) GetCompactionL0Trigger() int {
  361. if o == nil || o.CompactionL0Trigger == 0 {
  362. return DefaultCompactionL0Trigger
  363. }
  364. return o.CompactionL0Trigger
  365. }
  366. func (o *Options) GetCompactionSourceLimit(level int) int {
  367. factor := DefaultCompactionSourceLimitFactor
  368. if o != nil && o.CompactionSourceLimitFactor > 0 {
  369. factor = o.CompactionSourceLimitFactor
  370. }
  371. return o.GetCompactionTableSize(level+1) * factor
  372. }
  373. func (o *Options) GetCompactionTableSize(level int) int {
  374. var (
  375. base = DefaultCompactionTableSize
  376. mult float64
  377. )
  378. if o != nil {
  379. if o.CompactionTableSize > 0 {
  380. base = o.CompactionTableSize
  381. }
  382. if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
  383. mult = o.CompactionTableSizeMultiplierPerLevel[level]
  384. } else if o.CompactionTableSizeMultiplier > 0 {
  385. mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
  386. }
  387. }
  388. if mult == 0 {
  389. mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
  390. }
  391. return int(float64(base) * mult)
  392. }
  393. func (o *Options) GetCompactionTotalSize(level int) int64 {
  394. var (
  395. base = DefaultCompactionTotalSize
  396. mult float64
  397. )
  398. if o != nil {
  399. if o.CompactionTotalSize > 0 {
  400. base = o.CompactionTotalSize
  401. }
  402. if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
  403. mult = o.CompactionTotalSizeMultiplierPerLevel[level]
  404. } else if o.CompactionTotalSizeMultiplier > 0 {
  405. mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
  406. }
  407. }
  408. if mult == 0 {
  409. mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
  410. }
  411. return int64(float64(base) * mult)
  412. }
  413. func (o *Options) GetComparer() comparer.Comparer {
  414. if o == nil || o.Comparer == nil {
  415. return comparer.DefaultComparer
  416. }
  417. return o.Comparer
  418. }
  419. func (o *Options) GetCompression() Compression {
  420. if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
  421. return DefaultCompressionType
  422. }
  423. return o.Compression
  424. }
  425. func (o *Options) GetDisableBufferPool() bool {
  426. if o == nil {
  427. return false
  428. }
  429. return o.DisableBufferPool
  430. }
  431. func (o *Options) GetDisableBlockCache() bool {
  432. if o == nil {
  433. return false
  434. }
  435. return o.DisableBlockCache
  436. }
  437. func (o *Options) GetDisableCompactionBackoff() bool {
  438. if o == nil {
  439. return false
  440. }
  441. return o.DisableCompactionBackoff
  442. }
  443. func (o *Options) GetDisableLargeBatchTransaction() bool {
  444. if o == nil {
  445. return false
  446. }
  447. return o.DisableLargeBatchTransaction
  448. }
  449. func (o *Options) GetErrorIfExist() bool {
  450. if o == nil {
  451. return false
  452. }
  453. return o.ErrorIfExist
  454. }
  455. func (o *Options) GetErrorIfMissing() bool {
  456. if o == nil {
  457. return false
  458. }
  459. return o.ErrorIfMissing
  460. }
  461. func (o *Options) GetFilter() filter.Filter {
  462. if o == nil {
  463. return nil
  464. }
  465. return o.Filter
  466. }
  467. func (o *Options) GetIteratorSamplingRate() int {
  468. if o == nil || o.IteratorSamplingRate <= 0 {
  469. return DefaultIteratorSamplingRate
  470. }
  471. return o.IteratorSamplingRate
  472. }
  473. func (o *Options) GetNoSync() bool {
  474. if o == nil {
  475. return false
  476. }
  477. return o.NoSync
  478. }
  479. func (o *Options) GetNoWriteMerge() bool {
  480. if o == nil {
  481. return false
  482. }
  483. return o.NoWriteMerge
  484. }
  485. func (o *Options) GetOpenFilesCacher() Cacher {
  486. if o == nil || o.OpenFilesCacher == nil {
  487. return DefaultOpenFilesCacher
  488. }
  489. if o.OpenFilesCacher == NoCacher {
  490. return nil
  491. }
  492. return o.OpenFilesCacher
  493. }
  494. func (o *Options) GetOpenFilesCacheCapacity() int {
  495. if o == nil || o.OpenFilesCacheCapacity == 0 {
  496. return DefaultOpenFilesCacheCapacity
  497. } else if o.OpenFilesCacheCapacity < 0 {
  498. return 0
  499. }
  500. return o.OpenFilesCacheCapacity
  501. }
  502. func (o *Options) GetReadOnly() bool {
  503. if o == nil {
  504. return false
  505. }
  506. return o.ReadOnly
  507. }
  508. func (o *Options) GetStrict(strict Strict) bool {
  509. if o == nil || o.Strict == 0 {
  510. return DefaultStrict&strict != 0
  511. }
  512. return o.Strict&strict != 0
  513. }
  514. func (o *Options) GetWriteBuffer() int {
  515. if o == nil || o.WriteBuffer <= 0 {
  516. return DefaultWriteBuffer
  517. }
  518. return o.WriteBuffer
  519. }
  520. func (o *Options) GetWriteL0PauseTrigger() int {
  521. if o == nil || o.WriteL0PauseTrigger == 0 {
  522. return DefaultWriteL0PauseTrigger
  523. }
  524. return o.WriteL0PauseTrigger
  525. }
  526. func (o *Options) GetWriteL0SlowdownTrigger() int {
  527. if o == nil || o.WriteL0SlowdownTrigger == 0 {
  528. return DefaultWriteL0SlowdownTrigger
  529. }
  530. return o.WriteL0SlowdownTrigger
  531. }
  532. // ReadOptions holds the optional parameters for 'read operation'. The
  533. // 'read operation' includes Get, Find and NewIterator.
  534. type ReadOptions struct {
  535. // DontFillCache defines whether block reads for this 'read operation'
  536. // should be cached. If false then the block will be cached. This does
  537. // not affects already cached block.
  538. //
  539. // The default value is false.
  540. DontFillCache bool
  541. // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
  542. // is present. Currently only StrictReader that has effect here.
  543. Strict Strict
  544. }
  545. func (ro *ReadOptions) GetDontFillCache() bool {
  546. if ro == nil {
  547. return false
  548. }
  549. return ro.DontFillCache
  550. }
  551. func (ro *ReadOptions) GetStrict(strict Strict) bool {
  552. if ro == nil {
  553. return false
  554. }
  555. return ro.Strict&strict != 0
  556. }
  557. // WriteOptions holds the optional parameters for 'write operation'. The
  558. // 'write operation' includes Write, Put and Delete.
  559. type WriteOptions struct {
  560. // NoWriteMerge allows disabling write merge.
  561. //
  562. // The default is false.
  563. NoWriteMerge bool
  564. // Sync is whether to sync underlying writes from the OS buffer cache
  565. // through to actual disk, if applicable. Setting Sync can result in
  566. // slower writes.
  567. //
  568. // If false, and the machine crashes, then some recent writes may be lost.
  569. // Note that if it is just the process that crashes (and the machine does
  570. // not) then no writes will be lost.
  571. //
  572. // In other words, Sync being false has the same semantics as a write
  573. // system call. Sync being true means write followed by fsync.
  574. //
  575. // The default value is false.
  576. Sync bool
  577. }
  578. func (wo *WriteOptions) GetNoWriteMerge() bool {
  579. if wo == nil {
  580. return false
  581. }
  582. return wo.NoWriteMerge
  583. }
  584. func (wo *WriteOptions) GetSync() bool {
  585. if wo == nil {
  586. return false
  587. }
  588. return wo.Sync
  589. }
  590. func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
  591. if ro.GetStrict(StrictOverride) {
  592. return ro.GetStrict(strict)
  593. } else {
  594. return o.GetStrict(strict) || ro.GetStrict(strict)
  595. }
  596. }
上海开阖软件有限公司 沪ICP备12045867号-1