forked from cockroachdb/pebble
-
Notifications
You must be signed in to change notification settings - Fork 0
/
level_iter.go
1242 lines (1164 loc) · 50.4 KB
/
level_iter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2018 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package pebble
import (
"context"
"fmt"
"runtime/debug"
"github.com/cockroachdb/pebble/internal/base"
"github.com/cockroachdb/pebble/internal/invariants"
"github.com/cockroachdb/pebble/internal/keyspan"
"github.com/cockroachdb/pebble/internal/manifest"
"github.com/cockroachdb/pebble/sstable"
)
// tableNewIters creates a new point and range-del iterator for the given file
// number.
//
// On success, the internalIterator is not-nil and must be closed; the
// FragmentIterator can be nil.
// TODO(radu): always return a non-nil FragmentIterator.
//
// On error, the iterators are nil.
//
// The only (non-test) implementation of tableNewIters is tableCacheContainer.newIters().
type tableNewIters func(
ctx context.Context,
file *manifest.FileMetadata,
opts *IterOptions,
internalOpts internalIterOpts,
) (internalIterator, keyspan.FragmentIterator, error)
// tableNewRangeDelIter takes a tableNewIters and returns a TableNewSpanIter
// for the rangedel iterator returned by tableNewIters.
func tableNewRangeDelIter(ctx context.Context, newIters tableNewIters) keyspan.TableNewSpanIter {
return func(file *manifest.FileMetadata, iterOptions keyspan.SpanIterOptions) (keyspan.FragmentIterator, error) {
iter, rangeDelIter, err := newIters(ctx, file, nil, internalIterOpts{})
if iter != nil {
_ = iter.Close()
}
if rangeDelIter == nil {
rangeDelIter = emptyKeyspanIter
}
return rangeDelIter, err
}
}
type internalIterOpts struct {
bytesIterated *uint64
bufferPool *sstable.BufferPool
stats *base.InternalIteratorStats
boundLimitedFilter sstable.BoundLimitedBlockPropertyFilter
}
// levelIter provides a merged view of the sstables in a level.
//
// levelIter is used during compaction and as part of the Iterator
// implementation. When used as part of the Iterator implementation, level
// iteration needs to "pause" at sstable boundaries if a range deletion
// tombstone is the source of that boundary. We know if a range tombstone is
// the smallest or largest key in a file because the kind will be
// InternalKeyKindRangeDeletion. If the boundary key is a range deletion
// tombstone, we materialize a fake entry to return from levelIter. This
// prevents mergingIter from advancing past the sstable until the sstable
// contains the smallest (or largest for reverse iteration) key in the merged
// heap. Note that mergingIter treats a range deletion tombstone returned by
// the point iterator as a no-op.
//
// SeekPrefixGE presents the need for a second type of pausing. If an sstable
// iterator returns "not found" for a SeekPrefixGE operation, we don't want to
// advance to the next sstable as the "not found" does not indicate that all of
// the keys in the sstable are less than the search key. Advancing to the next
// sstable would cause us to skip over range tombstones, violating
// correctness. Instead, SeekPrefixGE creates a synthetic boundary key with the
// kind InternalKeyKindRangeDeletion which will be used to pause the levelIter
// at the sstable until the mergingIter is ready to advance past it.
type levelIter struct {
// The context is stored here since (a) iterators are expected to be
// short-lived (since they pin sstables), (b) plumbing a context into every
// method is very painful, (c) they do not (yet) respect context
// cancellation and are only used for tracing.
ctx context.Context
logger Logger
comparer *Comparer
cmp Compare
split Split
// The lower/upper bounds for iteration as specified at creation or the most
// recent call to SetBounds.
lower []byte
upper []byte
// The iterator options for the currently open table. If
// tableOpts.{Lower,Upper}Bound are nil, the corresponding iteration boundary
// does not lie within the table bounds.
tableOpts IterOptions
// The LSM level this levelIter is initialized for.
level manifest.Level
// The keys to return when iterating past an sstable boundary and that
// boundary is a range deletion tombstone. The boundary could be smallest
// (i.e. arrived at with Prev), or largest (arrived at with Next).
smallestBoundary *InternalKey
largestBoundary *InternalKey
// combinedIterState may be set when a levelIter is used during user
// iteration. Although levelIter only iterates over point keys, it's also
// responsible for lazily constructing the combined range & point iterator
// when it observes a file containing range keys. If the combined iter
// state's initialized field is true, the iterator is already using combined
// iterator, OR the iterator is not configured to use combined iteration. If
// it's false, the levelIter must set the `triggered` and `key` fields when
// the levelIter passes over a file containing range keys. See the
// lazyCombinedIter for more details.
combinedIterState *combinedIterState
// A synthetic boundary key to return when SeekPrefixGE finds an sstable
// which doesn't contain the search key, but which does contain range
// tombstones.
syntheticBoundary InternalKey
// The iter for the current file. It is nil under any of the following conditions:
// - files.Current() == nil
// - err != nil
// - some other constraint, like the bounds in opts, caused the file at index to not
// be relevant to the iteration.
iter internalIterator
// iterFile holds the current file. It is always equal to l.files.Current().
iterFile *fileMetadata
// filteredIter is an optional interface that may be implemented by internal
// iterators that perform filtering of keys. When a new file's iterator is
// opened, it's tested to see if it implements filteredIter. If it does,
// it's stored here to allow the level iterator to recognize when keys were
// omitted from iteration results due to filtering. This is important when a
// file contains range deletions that may delete keys from other files. The
// levelIter must not advance to the next file until the mergingIter has
// advanced beyond the file's bounds. See
// levelIterBoundaryContext.isIgnorableBoundaryKey.
filteredIter filteredIter
newIters tableNewIters
// When rangeDelIterPtr != nil, the caller requires that *rangeDelIterPtr must
// point to a range del iterator corresponding to the current file. When this
// iterator returns nil, *rangeDelIterPtr should also be set to nil. Whenever
// a non-nil internalIterator is placed in rangeDelIterPtr, a copy is placed
// in rangeDelIterCopy. This is done for the following special case:
// when this iterator returns nil because of exceeding the bounds, we don't
// close iter and *rangeDelIterPtr since we could reuse it in the next seek. But
// we need to set *rangeDelIterPtr to nil because of the aforementioned contract.
// This copy is used to revive the *rangeDelIterPtr in the case of reuse.
rangeDelIterPtr *keyspan.FragmentIterator
rangeDelIterCopy keyspan.FragmentIterator
files manifest.LevelIterator
err error
// Pointer into this level's entry in `mergingIterLevel::levelIterBoundaryContext`.
// We populate it with the corresponding bounds for the currently opened file. It is used for
// two purposes (described for forward iteration. The explanation for backward iteration is
// similar.)
// - To limit the optimization that seeks lower-level iterators past keys shadowed by a range
// tombstone. Limiting this seek to the file largestUserKey is necessary since
// range tombstones are stored untruncated, while they only apply to keys within their
// containing file's boundaries. For a detailed example, see comment above `mergingIter`.
// - To constrain the tombstone to act-within the bounds of the sstable when checking
// containment. For forward iteration we need the smallestUserKey.
//
// An example is sstable bounds [c#8, g#12] containing a tombstone [b, i)#7.
// - When doing a SeekGE to user key X, the levelIter is at this sstable because X is either within
// the sstable bounds or earlier than the start of the sstable (and there is no sstable in
// between at this level). If X >= smallestUserKey, and the tombstone [b, i) contains X,
// it is correct to SeekGE the sstables at lower levels to min(g, i) (i.e., min of
// largestUserKey, tombstone.End) since any user key preceding min(g, i) must be covered by this
// tombstone (since it cannot have a version younger than this tombstone as it is at a lower
// level). And even if X = smallestUserKey or equal to the start user key of the tombstone,
// if the above conditions are satisfied we know that the internal keys corresponding to X at
// lower levels must have a version smaller than that in this file (again because of the level
// argument). So we don't need to use sequence numbers for this comparison.
// - When checking whether this tombstone deletes internal key X we know that the levelIter is at this
// sstable so (repeating the above) X.UserKey is either within the sstable bounds or earlier than the
// start of the sstable (and there is no sstable in between at this level).
// - X is at at a lower level. If X.UserKey >= smallestUserKey, and the tombstone contains
// X.UserKey, we know X is deleted. This argument also works when X is a user key (we use
// it when seeking to test whether a user key is deleted).
// - X is at the same level. X must be within the sstable bounds of the tombstone so the
// X.UserKey >= smallestUserKey comparison is trivially true. In addition to the tombstone containing
// X we need to compare the sequence number of X and the tombstone (we don't need to look
// at how this tombstone is truncated to act-within the file bounds, which are InternalKeys,
// since X and the tombstone are from the same file).
//
// Iterating backwards has one more complication when checking whether a tombstone deletes
// internal key X at a lower level (the construction we do here also works for a user key X).
// Consider sstable bounds [c#8, g#InternalRangeDelSentinel] containing a tombstone [b, i)#7.
// If we are positioned at key g#10 at a lower sstable, the tombstone we will see is [b, i)#7,
// since the higher sstable is positioned at a key <= g#10. We should not use this tombstone
// to delete g#10. This requires knowing that the largestUserKey is a range delete sentinel,
// which we set in a separate bool below.
//
// These fields differs from the `*Boundary` fields in a few ways:
// - `*Boundary` is only populated when the iterator is positioned exactly on the sentinel key.
// - `*Boundary` can hold either the lower- or upper-bound, depending on the iterator direction.
// - `*Boundary` is not exposed to the next higher-level iterator, i.e., `mergingIter`.
boundaryContext *levelIterBoundaryContext
// internalOpts holds the internal iterator options to pass to the table
// cache when constructing new table iterators.
internalOpts internalIterOpts
// Scratch space for the obsolete keys filter, when there are no other block
// property filters specified. See the performance note where
// IterOptions.PointKeyFilters is declared.
filtersBuf [1]BlockPropertyFilter
// Disable invariant checks even if they are otherwise enabled. Used by tests
// which construct "impossible" situations (e.g. seeking to a key before the
// lower bound).
disableInvariants bool
}
// filteredIter is an additional interface implemented by iterators that may
// skip over point keys during iteration. The sstable.Iterator implements this
// interface.
type filteredIter interface {
// MaybeFilteredKeys may be called when an iterator is exhausted, indicating
// whether or not the iterator's last positioning method may have skipped
// any keys due to low-level filters.
//
// When an iterator is configured to use block-property filters, the
// low-level iterator may skip over blocks or whole sstables of keys.
// Implementations that implement skipping must implement this interface.
// Higher-level iterators require it to preserve invariants (eg, a levelIter
// used in a mergingIter must keep the file's range-del iterator open until
// the mergingIter has moved past the file's bounds, even if all of the
// file's point keys were filtered).
//
// MaybeFilteredKeys may always return false positives, that is it may
// return true when no keys were filtered. It should only be called when the
// iterator is exhausted. It must never return false negatives when the
// iterator is exhausted.
MaybeFilteredKeys() bool
}
// levelIter implements the base.InternalIterator interface.
var _ base.InternalIterator = (*levelIter)(nil)
// newLevelIter returns a levelIter. It is permissible to pass a nil split
// parameter if the caller is never going to call SeekPrefixGE.
func newLevelIter(
opts IterOptions,
comparer *Comparer,
newIters tableNewIters,
files manifest.LevelIterator,
level manifest.Level,
internalOpts internalIterOpts,
) *levelIter {
l := &levelIter{}
l.init(context.Background(), opts, comparer, newIters, files, level,
internalOpts)
return l
}
func (l *levelIter) init(
ctx context.Context,
opts IterOptions,
comparer *Comparer,
newIters tableNewIters,
files manifest.LevelIterator,
level manifest.Level,
internalOpts internalIterOpts,
) {
l.ctx = ctx
l.err = nil
l.level = level
l.logger = opts.getLogger()
l.lower = opts.LowerBound
l.upper = opts.UpperBound
l.tableOpts.TableFilter = opts.TableFilter
l.tableOpts.PointKeyFilters = opts.PointKeyFilters
if len(opts.PointKeyFilters) == 0 {
l.tableOpts.PointKeyFilters = l.filtersBuf[:0:1]
}
l.tableOpts.UseL6Filters = opts.UseL6Filters
l.tableOpts.level = l.level
l.tableOpts.snapshotForHideObsoletePoints = opts.snapshotForHideObsoletePoints
l.comparer = comparer
l.cmp = comparer.Compare
l.split = comparer.Split
l.iterFile = nil
l.newIters = newIters
l.files = files
l.internalOpts = internalOpts
}
func (l *levelIter) initRangeDel(rangeDelIter *keyspan.FragmentIterator) {
l.rangeDelIterPtr = rangeDelIter
}
func (l *levelIter) initBoundaryContext(context *levelIterBoundaryContext) {
l.boundaryContext = context
}
func (l *levelIter) initCombinedIterState(state *combinedIterState) {
l.combinedIterState = state
}
func (l *levelIter) maybeTriggerCombinedIteration(file *fileMetadata, dir int) {
// If we encounter a file that contains range keys, we may need to
// trigger a switch to combined range-key and point-key iteration,
// if the *pebble.Iterator is configured for it. This switch is done
// lazily because range keys are intended to be rare, and
// constructing the range-key iterator substantially adds to the
// cost of iterator construction and seeking.
//
// If l.combinedIterState.initialized is already true, either the
// iterator is already using combined iteration or the iterator is not
// configured to observe range keys. Either way, there's nothing to do.
// If false, trigger the switch to combined iteration, using the the
// file's bounds to seek the range-key iterator appropriately.
//
// We only need to trigger combined iteration if the file contains
// RangeKeySets: if there are only Unsets and Dels, the user will observe no
// range keys regardless. If this file has table stats available, they'll
// tell us whether the file has any RangeKeySets. Otherwise, we must
// fallback to assuming it does if HasRangeKeys=true.
if file != nil && file.HasRangeKeys && l.combinedIterState != nil && !l.combinedIterState.initialized &&
(l.upper == nil || l.cmp(file.SmallestRangeKey.UserKey, l.upper) < 0) &&
(l.lower == nil || l.cmp(file.LargestRangeKey.UserKey, l.lower) > 0) &&
(!file.StatsValid() || file.Stats.NumRangeKeySets > 0) {
// The file contains range keys, and we're not using combined iteration yet.
// Trigger a switch to combined iteration. It's possible that a switch has
// already been triggered if multiple levels encounter files containing
// range keys while executing a single mergingIter operation. In this case,
// we need to compare the existing key recorded to l.combinedIterState.key,
// adjusting it if our key is smaller (forward iteration) or larger
// (backward iteration) than the existing key.
//
// These key comparisons are only required during a single high-level
// iterator operation. When the high-level iter op completes,
// iinitialized will be true, and future calls to this function will be
// no-ops.
switch dir {
case +1:
if !l.combinedIterState.triggered {
l.combinedIterState.triggered = true
l.combinedIterState.key = file.SmallestRangeKey.UserKey
} else if l.cmp(l.combinedIterState.key, file.SmallestRangeKey.UserKey) > 0 {
l.combinedIterState.key = file.SmallestRangeKey.UserKey
}
case -1:
if !l.combinedIterState.triggered {
l.combinedIterState.triggered = true
l.combinedIterState.key = file.LargestRangeKey.UserKey
} else if l.cmp(l.combinedIterState.key, file.LargestRangeKey.UserKey) < 0 {
l.combinedIterState.key = file.LargestRangeKey.UserKey
}
}
}
}
func (l *levelIter) findFileGE(key []byte, flags base.SeekGEFlags) *fileMetadata {
// Find the earliest file whose largest key is >= key.
// NB: if flags.TrySeekUsingNext()=true, the levelIter must respect it. If
// the levelIter is positioned at the key P, it must return a key ≥ P. If
// used within a merging iterator, the merging iterator will depend on the
// levelIter only moving forward to maintain heap invariants.
// Ordinarily we seek the LevelIterator using SeekGE. In some instances, we
// Next instead. In other instances, we try Next-ing first, falling back to
// seek:
// a) flags.TrySeekUsingNext(): The top-level Iterator knows we're seeking
// to a key later than the current iterator position. We don't know how
// much later the seek key is, so it's possible there are many sstables
// between the current position and the seek key. However in most real-
// world use cases, the seek key is likely to be nearby. Rather than
// performing a log(N) seek through the file metadata, we next a few
// times from from our existing location. If we don't find a file whose
// largest is >= key within a few nexts, we fall back to seeking.
//
// Note that in this case, the file returned by findFileGE may be
// different than the file returned by a raw binary search (eg, when
// TrySeekUsingNext=false). This is possible because the most recent
// positioning operation may have already determined that previous
// files' keys that are ≥ key are all deleted. This information is
// encoded within the iterator's current iterator position and is
// unavailable to a fresh binary search.
//
// b) flags.RelativeSeek(): The merging iterator decided to re-seek this
// level according to a range tombstone. When lazy combined iteration
// is enabled, the level iterator is responsible for watching for
// files containing range keys and triggering the switch to combined
// iteration when such a file is observed. If a range deletion was
// observed in a higher level causing the merging iterator to seek the
// level to the range deletion's end key, we need to check whether all
// of the files between the old position and the new position contain
// any range keys.
//
// In this scenario, we don't seek the LevelIterator and instead we
// Next it, one file at a time, checking each for range keys. The
// merging iterator sets this flag to inform us that we're moving
// forward relative to the existing position and that we must examine
// each intermediate sstable's metadata for lazy-combined iteration.
// In this case, we only Next and never Seek. We set nextsUntilSeek=-1
// to signal this intention.
//
// NB: At most one of flags.RelativeSeek() and flags.TrySeekUsingNext() may
// be set, because the merging iterator re-seeks relative seeks with
// explicitly only the RelativeSeek flag set.
var nextsUntilSeek int
var nextInsteadOfSeek bool
if flags.TrySeekUsingNext() {
nextInsteadOfSeek = true
nextsUntilSeek = 4 // arbitrary
}
if flags.RelativeSeek() && l.combinedIterState != nil && !l.combinedIterState.initialized {
nextInsteadOfSeek = true
nextsUntilSeek = -1
}
var m *fileMetadata
if nextInsteadOfSeek {
m = l.iterFile
} else {
m = l.files.SeekGE(l.cmp, key)
}
// The below loop has a bit of an unusual organization. There are several
// conditions under which we need to Next to a later file. If none of those
// conditions are met, the file in `m` is okay to return. The loop body is
// structured with a series of if statements, each of which may continue the
// loop to the next file. If none of the statements are met, the end of the
// loop body is a break.
for m != nil {
if m.HasRangeKeys {
l.maybeTriggerCombinedIteration(m, +1)
// Some files may only contain range keys, which we can skip.
// NB: HasPointKeys=true if the file contains any points or range
// deletions (which delete points).
if !m.HasPointKeys {
m = l.files.Next()
continue
}
}
// This file has point keys.
//
// However, there are a couple reasons why `m` may not be positioned ≥
// `key` yet:
//
// 1. If SeekGE(key) landed on a file containing range keys, the file
// may contain range keys ≥ `key` but no point keys ≥ `key`.
// 2. When nexting instead of seeking, we must check to see whether
// we've nexted sufficiently far, or we need to next again.
//
// If the file does not contain point keys ≥ `key`, next to continue
// looking for a file that does.
if (m.HasRangeKeys || nextInsteadOfSeek) && l.cmp(m.LargestPointKey.UserKey, key) < 0 {
// If nextInsteadOfSeek is set and nextsUntilSeek is non-negative,
// the iterator has been nexting hoping to discover the relevant
// file without seeking. It's exhausted the allotted nextsUntilSeek
// and should seek to the sought key.
if nextInsteadOfSeek && nextsUntilSeek == 0 {
nextInsteadOfSeek = false
m = l.files.SeekGE(l.cmp, key)
continue
} else if nextsUntilSeek > 0 {
nextsUntilSeek--
}
m = l.files.Next()
continue
}
// This file has a point key bound ≥ `key`. But the largest point key
// bound may still be a range deletion sentinel, which is exclusive. In
// this case, the file doesn't actually contain any point keys equal to
// `key`. We next to keep searching for a file that actually contains
// point keys ≥ key.
//
// Additionally, this prevents loading untruncated range deletions from
// a table which can't possibly contain the target key and is required
// for correctness by mergingIter.SeekGE (see the comment in that
// function).
if m.LargestPointKey.IsExclusiveSentinel() && l.cmp(m.LargestPointKey.UserKey, key) == 0 {
m = l.files.Next()
continue
}
// This file contains point keys ≥ `key`. Break and return it.
break
}
return m
}
func (l *levelIter) findFileLT(key []byte, flags base.SeekLTFlags) *fileMetadata {
// Find the last file whose smallest key is < ikey.
// Ordinarily we seek the LevelIterator using SeekLT.
//
// When lazy combined iteration is enabled, there's a complication. The
// level iterator is responsible for watching for files containing range
// keys and triggering the switch to combined iteration when such a file is
// observed. If a range deletion was observed in a higher level causing the
// merging iterator to seek the level to the range deletion's start key, we
// need to check whether all of the files between the old position and the
// new position contain any range keys.
//
// In this scenario, we don't seek the LevelIterator and instead we Prev it,
// one file at a time, checking each for range keys.
prevInsteadOfSeek := flags.RelativeSeek() && l.combinedIterState != nil && !l.combinedIterState.initialized
var m *fileMetadata
if prevInsteadOfSeek {
m = l.iterFile
} else {
m = l.files.SeekLT(l.cmp, key)
}
// The below loop has a bit of an unusual organization. There are several
// conditions under which we need to Prev to a previous file. If none of
// those conditions are met, the file in `m` is okay to return. The loop
// body is structured with a series of if statements, each of which may
// continue the loop to the previous file. If none of the statements are
// met, the end of the loop body is a break.
for m != nil {
if m.HasRangeKeys {
l.maybeTriggerCombinedIteration(m, -1)
// Some files may only contain range keys, which we can skip.
// NB: HasPointKeys=true if the file contains any points or range
// deletions (which delete points).
if !m.HasPointKeys {
m = l.files.Prev()
continue
}
}
// This file has point keys.
//
// However, there are a couple reasons why `m` may not be positioned <
// `key` yet:
//
// 1. If SeekLT(key) landed on a file containing range keys, the file
// may contain range keys < `key` but no point keys < `key`.
// 2. When preving instead of seeking, we must check to see whether
// we've preved sufficiently far, or we need to prev again.
//
// If the file does not contain point keys < `key`, prev to continue
// looking for a file that does.
if (m.HasRangeKeys || prevInsteadOfSeek) && l.cmp(m.SmallestPointKey.UserKey, key) >= 0 {
m = l.files.Prev()
continue
}
// This file contains point keys < `key`. Break and return it.
break
}
return m
}
// Init the iteration bounds for the current table. Returns -1 if the table
// lies fully before the lower bound, +1 if the table lies fully after the
// upper bound, and 0 if the table overlaps the iteration bounds.
func (l *levelIter) initTableBounds(f *fileMetadata) int {
l.tableOpts.LowerBound = l.lower
if l.tableOpts.LowerBound != nil {
if l.cmp(f.LargestPointKey.UserKey, l.tableOpts.LowerBound) < 0 {
// The largest key in the sstable is smaller than the lower bound.
return -1
}
if l.cmp(l.tableOpts.LowerBound, f.SmallestPointKey.UserKey) <= 0 {
// The lower bound is smaller or equal to the smallest key in the
// table. Iteration within the table does not need to check the lower
// bound.
l.tableOpts.LowerBound = nil
}
}
l.tableOpts.UpperBound = l.upper
if l.tableOpts.UpperBound != nil {
if l.cmp(f.SmallestPointKey.UserKey, l.tableOpts.UpperBound) >= 0 {
// The smallest key in the sstable is greater than or equal to the upper
// bound.
return 1
}
if l.cmp(l.tableOpts.UpperBound, f.LargestPointKey.UserKey) > 0 {
// The upper bound is greater than the largest key in the
// table. Iteration within the table does not need to check the upper
// bound. NB: tableOpts.UpperBound is exclusive and f.LargestPointKey is
// inclusive.
l.tableOpts.UpperBound = nil
}
}
return 0
}
type loadFileReturnIndicator int8
const (
noFileLoaded loadFileReturnIndicator = iota
fileAlreadyLoaded
newFileLoaded
)
func (l *levelIter) loadFile(file *fileMetadata, dir int) loadFileReturnIndicator {
l.smallestBoundary = nil
l.largestBoundary = nil
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
if l.iterFile == file {
if l.err != nil {
return noFileLoaded
}
if l.iter != nil {
// We don't bother comparing the file bounds with the iteration bounds when we have
// an already open iterator. It is possible that the iter may not be relevant given the
// current iteration bounds, but it knows those bounds, so it will enforce them.
if l.rangeDelIterPtr != nil {
*l.rangeDelIterPtr = l.rangeDelIterCopy
}
// There are a few reasons we might not have triggered combined
// iteration yet, even though we already had `file` open.
// 1. If the bounds changed, we might have previously avoided
// switching to combined iteration because the bounds excluded
// the range keys contained in this file.
// 2. If an existing iterator was reconfigured to iterate over range
// keys (eg, using SetOptions), then we wouldn't have triggered
// the switch to combined iteration yet.
l.maybeTriggerCombinedIteration(file, dir)
return fileAlreadyLoaded
}
// We were already at file, but don't have an iterator, probably because the file was
// beyond the iteration bounds. It may still be, but it is also possible that the bounds
// have changed. We handle that below.
}
// Close both iter and rangeDelIterPtr. While mergingIter knows about
// rangeDelIterPtr, it can't call Close() on it because it does not know
// when the levelIter will switch it. Note that levelIter.Close() can be
// called multiple times.
if err := l.Close(); err != nil {
return noFileLoaded
}
for {
l.iterFile = file
if file == nil {
return noFileLoaded
}
l.maybeTriggerCombinedIteration(file, dir)
if !file.HasPointKeys {
switch dir {
case +1:
file = l.files.Next()
continue
case -1:
file = l.files.Prev()
continue
}
}
switch l.initTableBounds(file) {
case -1:
// The largest key in the sstable is smaller than the lower bound.
if dir < 0 {
return noFileLoaded
}
file = l.files.Next()
continue
case +1:
// The smallest key in the sstable is greater than or equal to the upper
// bound.
if dir > 0 {
return noFileLoaded
}
file = l.files.Prev()
continue
}
var rangeDelIter keyspan.FragmentIterator
var iter internalIterator
iter, rangeDelIter, l.err = l.newIters(l.ctx, l.iterFile, &l.tableOpts, l.internalOpts)
l.iter = iter
if l.err != nil {
return noFileLoaded
}
if rangeDelIter != nil {
if fi, ok := iter.(filteredIter); ok {
l.filteredIter = fi
} else {
l.filteredIter = nil
}
} else {
l.filteredIter = nil
}
if l.rangeDelIterPtr != nil {
*l.rangeDelIterPtr = rangeDelIter
l.rangeDelIterCopy = rangeDelIter
} else if rangeDelIter != nil {
rangeDelIter.Close()
}
if l.boundaryContext != nil {
l.boundaryContext.smallestUserKey = file.Smallest.UserKey
l.boundaryContext.largestUserKey = file.Largest.UserKey
l.boundaryContext.isLargestUserKeyExclusive = file.Largest.IsExclusiveSentinel()
}
return newFileLoaded
}
}
// In race builds we verify that the keys returned by levelIter lie within
// [lower,upper).
func (l *levelIter) verify(key *InternalKey, val base.LazyValue) (*InternalKey, base.LazyValue) {
// Note that invariants.Enabled is a compile time constant, which means the
// block of code will be compiled out of normal builds making this method
// eligible for inlining. Do not change this to use a variable.
if invariants.Enabled && !l.disableInvariants && key != nil {
// We allow returning a boundary key that is outside of the lower/upper
// bounds as such keys are always range tombstones which will be skipped by
// the Iterator.
if l.lower != nil && key != l.smallestBoundary && l.cmp(key.UserKey, l.lower) < 0 {
l.logger.Fatalf("levelIter %s: lower bound violation: %s < %s\n%s", l.level, key, l.lower, debug.Stack())
}
if l.upper != nil && key != l.largestBoundary && l.cmp(key.UserKey, l.upper) > 0 {
l.logger.Fatalf("levelIter %s: upper bound violation: %s > %s\n%s", l.level, key, l.upper, debug.Stack())
}
}
return key, val
}
func (l *levelIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) {
l.err = nil // clear cached iteration error
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
// NB: the top-level Iterator has already adjusted key based on
// IterOptions.LowerBound.
loadFileIndicator := l.loadFile(l.findFileGE(key, flags), +1)
if loadFileIndicator == noFileLoaded {
return nil, base.LazyValue{}
}
if loadFileIndicator == newFileLoaded {
// File changed, so l.iter has changed, and that iterator is not
// positioned appropriately.
flags = flags.DisableTrySeekUsingNext()
}
if ikey, val := l.iter.SeekGE(key, flags); ikey != nil {
return l.verify(ikey, val)
}
return l.verify(l.skipEmptyFileForward())
}
func (l *levelIter) SeekPrefixGE(
prefix, key []byte, flags base.SeekGEFlags,
) (*base.InternalKey, base.LazyValue) {
l.err = nil // clear cached iteration error
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
// NB: the top-level Iterator has already adjusted key based on
// IterOptions.LowerBound.
loadFileIndicator := l.loadFile(l.findFileGE(key, flags), +1)
if loadFileIndicator == noFileLoaded {
return nil, base.LazyValue{}
}
if loadFileIndicator == newFileLoaded {
// File changed, so l.iter has changed, and that iterator is not
// positioned appropriately.
flags = flags.DisableTrySeekUsingNext()
}
if key, val := l.iter.SeekPrefixGE(prefix, key, flags); key != nil {
return l.verify(key, val)
}
// When SeekPrefixGE returns nil, we have not necessarily reached the end of
// the sstable. All we know is that a key with prefix does not exist in the
// current sstable. We do know that the key lies within the bounds of the
// table as findFileGE found the table where key <= meta.Largest. We return
// the table's bound with isIgnorableBoundaryKey set.
if l.rangeDelIterPtr != nil && *l.rangeDelIterPtr != nil {
if l.tableOpts.UpperBound != nil {
l.syntheticBoundary.UserKey = l.tableOpts.UpperBound
l.syntheticBoundary.Trailer = InternalKeyRangeDeleteSentinel
l.largestBoundary = &l.syntheticBoundary
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = true
l.boundaryContext.isIgnorableBoundaryKey = false
}
return l.verify(l.largestBoundary, base.LazyValue{})
}
// Return the file's largest bound, ensuring this file stays open until
// the mergingIter advances beyond the file's bounds. We set
// isIgnorableBoundaryKey to signal that the actual key returned should
// be ignored, and does not represent a real key in the database.
l.largestBoundary = &l.iterFile.LargestPointKey
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = true
}
return l.verify(l.largestBoundary, base.LazyValue{})
}
// It is possible that we are here because bloom filter matching failed. In
// that case it is likely that all keys matching the prefix are wholly
// within the current file and cannot be in the subsequent file. In that
// case we don't want to go to the next file, since loading and seeking in
// there has some cost. Additionally, for sparse key spaces, loading the
// next file will defeat the optimization for the next SeekPrefixGE that is
// called with flags.TrySeekUsingNext(), since for sparse key spaces it is
// likely that the next key will also be contained in the current file.
var n int
if l.split != nil {
// If the split function is specified, calculate the prefix length accordingly.
n = l.split(l.iterFile.LargestPointKey.UserKey)
} else {
// If the split function is not specified, the entire key is used as the
// prefix. This case can occur when getIter uses SeekPrefixGE.
n = len(l.iterFile.LargestPointKey.UserKey)
}
if l.cmp(prefix, l.iterFile.LargestPointKey.UserKey[:n]) < 0 {
return nil, base.LazyValue{}
}
return l.verify(l.skipEmptyFileForward())
}
func (l *levelIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) {
l.err = nil // clear cached iteration error
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
// NB: the top-level Iterator has already adjusted key based on
// IterOptions.UpperBound.
if l.loadFile(l.findFileLT(key, flags), -1) == noFileLoaded {
return nil, base.LazyValue{}
}
if key, val := l.iter.SeekLT(key, flags); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileBackward())
}
func (l *levelIter) First() (*InternalKey, base.LazyValue) {
l.err = nil // clear cached iteration error
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
// NB: the top-level Iterator will call SeekGE if IterOptions.LowerBound is
// set.
if l.loadFile(l.files.First(), +1) == noFileLoaded {
return nil, base.LazyValue{}
}
if key, val := l.iter.First(); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileForward())
}
func (l *levelIter) Last() (*InternalKey, base.LazyValue) {
l.err = nil // clear cached iteration error
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
// NB: the top-level Iterator will call SeekLT if IterOptions.UpperBound is
// set.
if l.loadFile(l.files.Last(), -1) == noFileLoaded {
return nil, base.LazyValue{}
}
if key, val := l.iter.Last(); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileBackward())
}
func (l *levelIter) Next() (*InternalKey, base.LazyValue) {
if l.err != nil || l.iter == nil {
return nil, base.LazyValue{}
}
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
switch {
case l.largestBoundary != nil:
if l.tableOpts.UpperBound != nil {
// The UpperBound was within this file, so don't load the next
// file. We leave the largestBoundary unchanged so that subsequent
// calls to Next() stay at this file. If a Seek/First/Last call is
// made and this file continues to be relevant, loadFile() will
// set the largestBoundary to nil.
if l.rangeDelIterPtr != nil {
*l.rangeDelIterPtr = nil
}
return nil, base.LazyValue{}
}
// We're stepping past the boundary key, so now we can load the next file.
if l.loadFile(l.files.Next(), +1) != noFileLoaded {
if key, val := l.iter.First(); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileForward())
}
return nil, base.LazyValue{}
default:
// Reset the smallest boundary since we're moving away from it.
l.smallestBoundary = nil
if key, val := l.iter.Next(); key != nil {
return l.verify(key, val)
}
}
return l.verify(l.skipEmptyFileForward())
}
func (l *levelIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) {
if l.err != nil || l.iter == nil {
return nil, base.LazyValue{}
}
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
switch {
case l.largestBoundary != nil:
if l.tableOpts.UpperBound != nil {
// The UpperBound was within this file, so don't load the next
// file. We leave the largestBoundary unchanged so that subsequent
// calls to Next() stay at this file. If a Seek/First/Last call is
// made and this file continues to be relevant, loadFile() will
// set the largestBoundary to nil.
if l.rangeDelIterPtr != nil {
*l.rangeDelIterPtr = nil
}
return nil, base.LazyValue{}
}
// We're stepping past the boundary key, so we need to load a later
// file.
default:
// Reset the smallest boundary since we're moving away from it.
l.smallestBoundary = nil
if key, val := l.iter.NextPrefix(succKey); key != nil {
return l.verify(key, val)
}
// Fall through to seeking.
}
// Seek the manifest level iterator using TrySeekUsingNext=true and
// RelativeSeek=true so that we take advantage of the knowledge that
// `succKey` can only be contained in later files.
metadataSeekFlags := base.SeekGEFlagsNone.EnableTrySeekUsingNext().EnableRelativeSeek()
if l.loadFile(l.findFileGE(succKey, metadataSeekFlags), +1) != noFileLoaded {
// NB: The SeekGE on the file's iterator must not set TrySeekUsingNext,
// because l.iter is unpositioned.
if key, val := l.iter.SeekGE(succKey, base.SeekGEFlagsNone); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileForward())
}
return nil, base.LazyValue{}
}
func (l *levelIter) Prev() (*InternalKey, base.LazyValue) {
if l.err != nil || l.iter == nil {
return nil, base.LazyValue{}
}
if l.boundaryContext != nil {
l.boundaryContext.isSyntheticIterBoundsKey = false
l.boundaryContext.isIgnorableBoundaryKey = false
}
switch {
case l.smallestBoundary != nil:
if l.tableOpts.LowerBound != nil {
// The LowerBound was within this file, so don't load the previous
// file. We leave the smallestBoundary unchanged so that
// subsequent calls to Prev() stay at this file. If a
// Seek/First/Last call is made and this file continues to be
// relevant, loadFile() will set the smallestBoundary to nil.
if l.rangeDelIterPtr != nil {
*l.rangeDelIterPtr = nil
}
return nil, base.LazyValue{}
}
// We're stepping past the boundary key, so now we can load the prev file.
if l.loadFile(l.files.Prev(), -1) != noFileLoaded {
if key, val := l.iter.Last(); key != nil {
return l.verify(key, val)
}
return l.verify(l.skipEmptyFileBackward())
}
return nil, base.LazyValue{}
default:
// Reset the largest boundary since we're moving away from it.