forked from scylladb/scylladb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cache_mutation_reader.hh
1122 lines (1027 loc) · 52.1 KB
/
cache_mutation_reader.hh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (C) 2017-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#pragma once
#include "utils/assert.hh"
#include <vector>
#include "row_cache.hh"
#include "mutation/mutation_fragment.hh"
#include "query-request.hh"
#include "partition_snapshot_row_cursor.hh"
#include "read_context.hh"
#include "readers/delegating_v2.hh"
#include "clustering_key_filter.hh"
namespace cache {
extern logging::logger clogger;
class cache_mutation_reader final : public mutation_reader::impl {
enum class state {
before_static_row,
// Invariants:
// - position_range(_lower_bound, _upper_bound) covers all not yet emitted positions from current range
// - if _next_row has valid iterators:
// - _next_row points to the nearest row in cache >= _lower_bound
// - _next_row_in_range = _next.position() < _upper_bound
// - if _next_row doesn't have valid iterators, it has no meaning.
reading_from_cache,
// Starts reading from underlying reader.
// The range to read is position_range(_lower_bound, min(_next_row.position(), _upper_bound)).
// Invariants:
// - _next_row_in_range = _next.position() < _upper_bound
move_to_underlying,
// Invariants:
// - Upper bound of the read is *_underlying_upper_bound
// - _next_row_in_range = _next.position() < _upper_bound
// - _last_row points at a direct predecessor of the next row which is going to be read.
// Used for populating continuity.
// - _population_range_starts_before_all_rows is set accordingly
// - _underlying is engaged and fast-forwarded
reading_from_underlying,
end_of_stream
};
partition_snapshot_ptr _snp;
query::clustering_key_filter_ranges _ck_ranges; // Query schema domain, reversed reads use native order
query::clustering_row_ranges::const_iterator _ck_ranges_curr; // Query schema domain
query::clustering_row_ranges::const_iterator _ck_ranges_end; // Query schema domain
lsa_manager _lsa_manager;
partition_snapshot_row_weakref _last_row; // Table schema domain
// Holds the lower bound of a position range which hasn't been processed yet.
// Only rows with positions < _lower_bound have been emitted, and only
// range_tombstone_changes with positions <= _lower_bound.
//
// Invariant: !_lower_bound.is_clustering_row()
position_in_partition _lower_bound; // Query schema domain
// Invariant: !_upper_bound.is_clustering_row()
position_in_partition_view _upper_bound; // Query schema domain
std::optional<position_in_partition> _underlying_upper_bound; // Query schema domain
// cache_mutation_reader may be constructed either
// with a read_context&, where it knows that the read_context
// is owned externally, by the caller. In this case
// _read_context_holder would be disengaged.
// Or, it could be constructed with a std::unique_ptr<read_context>,
// in which case it assumes ownership of the read_context
// and it is now responsible for closing it.
// In this case, _read_context_holder would be engaged
// and _read_context will reference its content.
std::unique_ptr<read_context> _read_context_holder;
read_context& _read_context;
partition_snapshot_row_cursor _next_row;
// Holds the currently active range tombstone of the output mutation fragment stream.
// While producing the stream, at any given time, _current_tombstone applies to the
// key range which extends at least to _lower_bound. When consuming subsequent interval,
// which will advance _lower_bound further, be it from underlying or from cache,
// a decision is made whether the range tombstone in the next interval is the same as
// the current one or not. If it is different, then range_tombstone_change is emitted
// with the old _lower_bound value (start of the next interval).
tombstone _current_tombstone;
state _state = state::before_static_row;
bool _next_row_in_range = false;
bool _has_rt = false;
// True iff current population interval starts at before_all_clustered_rows
// and _last_row is unset. (And the read isn't reverse).
//
// Rationale: in the "most general" step of cache population,
// we mark the `(_last_row, ...] `range as continuous, which can involve doing something to `_last_row`.
// But when populating the range `(before_all_clustered_rows, ...)`,
// a rows_entry at `before_all_clustered_rows` needn't exist.
// Thus this case needs a special treatment which doesn't involve `_last_row`.
// And for that, this case it has to be recognized (via this flag).
//
// We cannot just look at _lower_bound, because emission of range tombstones changes _lower_bound and
// because we mark clustering intervals as continuous when consuming a clustering_row, it would prevent
// us from marking the interval as continuous.
// Valid when _state == reading_from_underlying.
bool _population_range_starts_before_all_rows;
// Points to the underlying reader conforming to _schema,
// either to *_underlying_holder or _read_context.underlying().underlying().
mutation_reader* _underlying = nullptr;
mutation_reader_opt _underlying_holder;
gc_clock::time_point _read_time;
gc_clock::time_point _gc_before;
future<> do_fill_buffer();
future<> ensure_underlying();
void copy_from_cache_to_buffer();
future<> process_static_row();
void move_to_end();
void move_to_next_range();
void move_to_range(query::clustering_row_ranges::const_iterator);
void move_to_next_entry();
void maybe_drop_last_entry(tombstone) noexcept;
void add_to_buffer(const partition_snapshot_row_cursor&);
void add_clustering_row_to_buffer(mutation_fragment_v2&&);
void add_to_buffer(range_tombstone_change&&);
void offer_from_underlying(mutation_fragment_v2&&);
future<> read_from_underlying();
void start_reading_from_underlying();
bool after_current_range(position_in_partition_view position);
bool can_populate() const;
// Marks the range between _last_row (exclusive) and _next_row (exclusive) as continuous,
// provided that the underlying reader still matches the latest version of the partition.
// Invalidates _last_row.
void maybe_update_continuity();
// Tries to ensure that the lower bound of the current population range exists.
// Returns false if it failed and range cannot be populated.
// Assumes can_populate().
// If returns true then _last_row is refreshed and points to the population lower bound.
// if _read_context.is_reversed() then _last_row is always valid after this.
// if !_read_context.is_reversed() then _last_row is valid after this or the population lower bound
// is before all rows (so _last_row doesn't point at any entry).
bool ensure_population_lower_bound();
void maybe_add_to_cache(const mutation_fragment_v2& mf);
void maybe_add_to_cache(const clustering_row& cr);
bool maybe_add_to_cache(const range_tombstone_change& rtc);
void maybe_add_to_cache(const static_row& sr);
void maybe_set_static_row_continuous();
void set_rows_entry_continuous(rows_entry& e);
void restore_continuity_after_insertion(const mutation_partition::rows_type::iterator&);
void finish_reader() {
push_mutation_fragment(*_schema, _permit, partition_end());
_end_of_stream = true;
_state = state::end_of_stream;
}
const schema_ptr& snp_schema() const {
return _snp->schema();
}
void touch_partition();
position_in_partition_view to_table_domain(position_in_partition_view query_domain_pos) {
if (!_read_context.is_reversed()) [[likely]] {
return query_domain_pos;
}
return query_domain_pos.reversed();
}
range_tombstone to_table_domain(range_tombstone query_domain_rt) {
if (_read_context.is_reversed()) [[unlikely]] {
query_domain_rt.reverse();
}
return query_domain_rt;
}
position_in_partition_view to_query_domain(position_in_partition_view table_domain_pos) {
if (!_read_context.is_reversed()) [[likely]] {
return table_domain_pos;
}
return table_domain_pos.reversed();
}
const schema& table_schema() {
return *_snp->schema();
}
gc_clock::time_point get_read_time() {
return _read_context.tombstone_gc_state() ? gc_clock::now() : gc_clock::time_point::min();
}
gc_clock::time_point get_gc_before(const schema& schema, dht::decorated_key dk, const gc_clock::time_point query_time) {
auto gc_state = _read_context.tombstone_gc_state();
if (gc_state) {
return gc_state->get_gc_before_for_key(schema.shared_from_this(), dk, query_time);
}
return gc_clock::time_point::min();
}
public:
cache_mutation_reader(schema_ptr s,
dht::decorated_key dk,
query::clustering_key_filter_ranges&& crr,
read_context& ctx,
partition_snapshot_ptr snp,
row_cache& cache)
: mutation_reader::impl(std::move(s), ctx.permit())
, _snp(std::move(snp))
, _ck_ranges(std::move(crr))
, _ck_ranges_curr(_ck_ranges.begin())
, _ck_ranges_end(_ck_ranges.end())
, _lsa_manager(cache)
, _lower_bound(position_in_partition::before_all_clustered_rows())
, _upper_bound(position_in_partition_view::before_all_clustered_rows())
, _read_context_holder()
, _read_context(ctx) // ctx is owned by the caller, who's responsible for closing it.
, _next_row(*_schema, *_snp, false, _read_context.is_reversed())
, _read_time(get_read_time())
, _gc_before(get_gc_before(*_schema, dk, _read_time))
{
clogger.trace("csm {}: table={}.{}, reversed={}, snap={}", fmt::ptr(this), _schema->ks_name(), _schema->cf_name(), _read_context.is_reversed(),
fmt::ptr(&*_snp));
push_mutation_fragment(*_schema, _permit, partition_start(std::move(dk), _snp->partition_tombstone()));
}
cache_mutation_reader(schema_ptr s,
dht::decorated_key dk,
query::clustering_key_filter_ranges&& crr,
std::unique_ptr<read_context> unique_ctx,
partition_snapshot_ptr snp,
row_cache& cache)
: cache_mutation_reader(s, std::move(dk), std::move(crr), *unique_ctx, std::move(snp), cache)
{
// Assume ownership of the read_context.
// It is our responsibility to close it now.
_read_context_holder = std::move(unique_ctx);
}
cache_mutation_reader(const cache_mutation_reader&) = delete;
cache_mutation_reader(cache_mutation_reader&&) = delete;
virtual future<> fill_buffer() override;
virtual future<> next_partition() override {
clear_buffer_to_next_partition();
if (is_buffer_empty()) {
_end_of_stream = true;
}
return make_ready_future<>();
}
virtual future<> fast_forward_to(const dht::partition_range&) override {
clear_buffer();
_end_of_stream = true;
return make_ready_future<>();
}
virtual future<> fast_forward_to(position_range pr) override {
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
}
virtual future<> close() noexcept override {
auto close_read_context = _read_context_holder ? _read_context_holder->close() : make_ready_future<>();
auto close_underlying = _underlying_holder ? _underlying_holder->close() : make_ready_future<>();
return when_all_succeed(std::move(close_read_context), std::move(close_underlying)).discard_result();
}
};
inline
future<> cache_mutation_reader::process_static_row() {
if (_snp->static_row_continuous()) {
_read_context.cache().on_row_hit();
static_row sr = _lsa_manager.run_in_read_section([this] {
return _snp->static_row(_read_context.digest_requested());
});
if (!sr.empty()) {
push_mutation_fragment(*_schema, _permit, std::move(sr));
}
return make_ready_future<>();
} else {
_read_context.cache().on_row_miss();
return ensure_underlying().then([this] {
return (*_underlying)().then([this] (mutation_fragment_v2_opt&& sr) {
if (sr) {
SCYLLA_ASSERT(sr->is_static_row());
maybe_add_to_cache(sr->as_static_row());
push_mutation_fragment(std::move(*sr));
}
maybe_set_static_row_continuous();
});
});
}
}
inline
void cache_mutation_reader::touch_partition() {
_snp->touch();
}
inline
future<> cache_mutation_reader::fill_buffer() {
if (_state == state::before_static_row) {
touch_partition();
auto after_static_row = [this] {
if (_ck_ranges_curr == _ck_ranges_end) {
finish_reader();
return make_ready_future<>();
}
_state = state::reading_from_cache;
_lsa_manager.run_in_read_section([this] {
move_to_range(_ck_ranges_curr);
});
return fill_buffer();
};
if (_schema->has_static_columns()) {
return process_static_row().then(std::move(after_static_row));
} else {
return after_static_row();
}
}
clogger.trace("csm {}: fill_buffer(), range={}, lb={}", fmt::ptr(this), *_ck_ranges_curr, _lower_bound);
return do_until([this] { return _end_of_stream || is_buffer_full(); }, [this] {
return do_fill_buffer();
});
}
inline
future<> cache_mutation_reader::ensure_underlying() {
if (_underlying) {
return make_ready_future<>();
}
return _read_context.ensure_underlying().then([this] {
mutation_reader& ctx_underlying = _read_context.underlying().underlying();
if (ctx_underlying.schema() != _schema) {
_underlying_holder = make_delegating_reader(ctx_underlying);
_underlying_holder->upgrade_schema(_schema);
_underlying = &*_underlying_holder;
} else {
_underlying = &ctx_underlying;
}
});
}
inline
future<> cache_mutation_reader::do_fill_buffer() {
if (_state == state::move_to_underlying) {
if (!_underlying) {
return ensure_underlying().then([this] {
return do_fill_buffer();
});
}
_state = state::reading_from_underlying;
_population_range_starts_before_all_rows = _lower_bound.is_before_all_clustered_rows(*_schema) && !_read_context.is_reversed() && !_last_row;
_underlying_upper_bound = _next_row_in_range ? position_in_partition::before_key(_next_row.position())
: position_in_partition(_upper_bound);
if (!_read_context.partition_exists()) {
clogger.trace("csm {}: partition does not exist", fmt::ptr(this));
if (_current_tombstone) {
clogger.trace("csm {}: move_to_underlying: emit rtc({}, null)", fmt::ptr(this), _lower_bound);
push_mutation_fragment(mutation_fragment_v2(*_schema, _permit, range_tombstone_change(_lower_bound, {})));
_current_tombstone = {};
}
return read_from_underlying();
}
return _underlying->fast_forward_to(position_range{_lower_bound, *_underlying_upper_bound}).then([this] {
if (!_current_tombstone) {
return read_from_underlying();
}
return _underlying->peek().then([this] (mutation_fragment_v2* mf) {
position_in_partition::equal_compare eq(*_schema);
if (!mf || !mf->is_range_tombstone_change()
|| !eq(mf->as_range_tombstone_change().position(), _lower_bound)) {
clogger.trace("csm {}: move_to_underlying: emit rtc({}, null)", fmt::ptr(this), _lower_bound);
push_mutation_fragment(mutation_fragment_v2(*_schema, _permit, range_tombstone_change(_lower_bound, {})));
_current_tombstone = {};
}
return read_from_underlying();
});
});
}
if (_state == state::reading_from_underlying) {
return read_from_underlying();
}
// SCYLLA_ASSERT(_state == state::reading_from_cache)
return _lsa_manager.run_in_read_section([this] {
auto next_valid = _next_row.iterators_valid();
clogger.trace("csm {}: reading_from_cache, range=[{}, {}), next={}, valid={}, rt={}", fmt::ptr(this), _lower_bound,
_upper_bound, _next_row.position(), next_valid, _current_tombstone);
// We assume that if there was eviction, and thus the range may
// no longer be continuous, the cursor was invalidated.
if (!next_valid) {
auto adjacent = _next_row.advance_to(_lower_bound);
_next_row_in_range = !after_current_range(_next_row.position());
if (!adjacent && !_next_row.continuous()) {
_last_row = nullptr; // We could insert a dummy here, but this path is unlikely.
start_reading_from_underlying();
return make_ready_future<>();
}
}
_next_row.maybe_refresh();
clogger.trace("csm {}: next={}", fmt::ptr(this), _next_row);
while (_state == state::reading_from_cache) {
copy_from_cache_to_buffer();
if (need_preempt() || is_buffer_full()) {
break;
}
}
return make_ready_future<>();
});
}
inline
future<> cache_mutation_reader::read_from_underlying() {
return consume_mutation_fragments_until(*_underlying,
[this] { return _state != state::reading_from_underlying || is_buffer_full(); },
[this] (mutation_fragment_v2 mf) {
_read_context.cache().on_row_miss();
offer_from_underlying(std::move(mf));
},
[this] {
_lower_bound = std::move(*_underlying_upper_bound);
_underlying_upper_bound.reset();
_state = state::reading_from_cache;
_lsa_manager.run_in_update_section([this] {
auto same_pos = _next_row.maybe_refresh();
clogger.trace("csm {}: underlying done, in_range={}, same={}, next={}", fmt::ptr(this), _next_row_in_range, same_pos, _next_row);
if (!same_pos) {
_read_context.cache().on_mispopulate(); // FIXME: Insert dummy entry at _lower_bound.
_next_row_in_range = !after_current_range(_next_row.position());
if (!_next_row.continuous()) {
_last_row = nullptr; // We did not populate the full range up to _lower_bound, break continuity
start_reading_from_underlying();
}
return;
}
if (_next_row_in_range) {
maybe_update_continuity();
} else {
if (can_populate()) {
const schema& table_s = table_schema();
rows_entry::tri_compare cmp(table_s);
auto& rows = _snp->version()->partition().mutable_clustered_rows();
if (query::is_single_row(*_schema, *_ck_ranges_curr)) {
// If there are range tombstones which apply to the row then
// we cannot insert an empty entry here because if those range
// tombstones got evicted by now, we will insert an entry
// with missing range tombstone information.
// FIXME: try to set the range tombstone when possible.
if (!_has_rt) {
with_allocator(_snp->region().allocator(), [&] {
auto e = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(_ck_ranges_curr->start()->value()));
// Use _next_row iterator only as a hint, because there could be insertions after _upper_bound.
auto insert_result = rows.insert_before_hint(
_next_row.at_a_row() ? _next_row.get_iterator_in_latest_version() : rows.begin(),
std::move(e),
cmp);
if (insert_result.second) {
auto it = insert_result.first;
_snp->tracker()->insert(*it);
auto next = std::next(it);
// Also works in reverse read mode.
// It preserves the continuity of the range the entry falls into.
it->set_continuous(next->continuous());
clogger.trace("csm {}: inserted empty row at {}, cont={}, rt={}", fmt::ptr(this), it->position(), it->continuous(), it->range_tombstone());
}
});
}
} else if (ensure_population_lower_bound()) {
with_allocator(_snp->region().allocator(), [&] {
auto e = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(table_s, to_table_domain(_upper_bound), is_dummy::yes, is_continuous::no));
// Use _next_row iterator only as a hint, because there could be insertions after _upper_bound.
auto insert_result = rows.insert_before_hint(
_next_row.at_a_row() ? _next_row.get_iterator_in_latest_version() : rows.begin(),
std::move(e),
cmp);
if (insert_result.second) {
clogger.trace("csm {}: L{}: inserted dummy at {}", fmt::ptr(this), __LINE__, _upper_bound);
_snp->tracker()->insert(*insert_result.first);
restore_continuity_after_insertion(insert_result.first);
}
if (_read_context.is_reversed()) [[unlikely]] {
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), _last_row.position(), insert_result.first->position(), _current_tombstone);
set_rows_entry_continuous(*_last_row);
_last_row->set_range_tombstone(_current_tombstone);
} else {
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(), _last_row.position(), _current_tombstone);
set_rows_entry_continuous(*insert_result.first);
insert_result.first->set_range_tombstone(_current_tombstone);
}
maybe_drop_last_entry(_current_tombstone);
});
}
} else {
_read_context.cache().on_mispopulate();
}
try {
move_to_next_range();
} catch (const std::bad_alloc&) {
// We cannot reenter the section, since we may have moved to the new range
_snp->region().allocator().invalidate_references(); // Invalidates _next_row
}
}
});
return make_ready_future<>();
});
}
inline
bool cache_mutation_reader::ensure_population_lower_bound() {
if (_population_range_starts_before_all_rows) {
return true;
}
if (!_last_row.refresh(*_snp)) {
return false;
}
// Continuity flag we will later set for the upper bound extends to the previous row in the same version,
// so we need to ensure we have an entry in the latest version.
if (!_last_row.is_in_latest_version()) {
rows_entry::tri_compare cmp(*_schema);
partition_snapshot_row_cursor cur(*_schema, *_snp, false, _read_context.is_reversed());
if (!cur.advance_to(to_query_domain(_last_row.position()))) {
return false;
}
if (cmp(cur.table_position(), _last_row.position()) != 0) {
return false;
}
auto res = with_allocator(_snp->region().allocator(), [&] {
return cur.ensure_entry_in_latest();
});
_last_row.set_latest(res.it);
if (res.inserted) {
clogger.trace("csm {}: inserted lower bound dummy at {}", fmt::ptr(this), _last_row.position());
}
}
return true;
}
inline
void cache_mutation_reader::maybe_update_continuity() {
position_in_partition::equal_compare eq(*_schema);
if (can_populate()
&& ensure_population_lower_bound()
&& !eq(_last_row.position(), _next_row.table_position())) {
with_allocator(_snp->region().allocator(), [&] {
rows_entry& e = _next_row.ensure_entry_in_latest().row;
auto& rows = _snp->version()->partition().mutable_clustered_rows();
const schema& table_s = table_schema();
rows_entry::tri_compare table_cmp(table_s);
if (_read_context.is_reversed()) [[unlikely]] {
if (_current_tombstone != _last_row->range_tombstone() && !_last_row->dummy()) {
with_allocator(_snp->region().allocator(), [&] {
auto e2 = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(table_s,
position_in_partition_view::before_key(_last_row->position()),
is_dummy::yes,
is_continuous::yes));
auto insert_result = rows.insert(std::move(e2), table_cmp);
if (insert_result.second) {
clogger.trace("csm {}: L{}: inserted dummy at {}", fmt::ptr(this), __LINE__, insert_result.first->position());
_snp->tracker()->insert(*insert_result.first);
}
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(),
_last_row.position(), _current_tombstone);
set_rows_entry_continuous(*insert_result.first);
insert_result.first->set_range_tombstone(_current_tombstone);
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), _last_row.position());
set_rows_entry_continuous(*_last_row);
});
} else {
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), _last_row.position(), _current_tombstone);
set_rows_entry_continuous(*_last_row);
_last_row->set_range_tombstone(_current_tombstone);
}
} else {
if (_current_tombstone != e.range_tombstone() && !e.dummy()) {
with_allocator(_snp->region().allocator(), [&] {
auto e2 = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(table_s,
position_in_partition_view::before_key(e.position()),
is_dummy::yes,
is_continuous::yes));
// Use _next_row iterator only as a hint because there could be insertions before
// _next_row.get_iterator_in_latest_version(), either from concurrent reads,
// from _next_row.ensure_entry_in_latest().
auto insert_result = rows.insert_before_hint(_next_row.get_iterator_in_latest_version(), std::move(e2), table_cmp);
if (insert_result.second) {
clogger.trace("csm {}: L{}: inserted dummy at {}", fmt::ptr(this), __LINE__, insert_result.first->position());
_snp->tracker()->insert(*insert_result.first);
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(),
_last_row.position(), _current_tombstone);
set_rows_entry_continuous(*insert_result.first);
insert_result.first->set_range_tombstone(_current_tombstone);
}
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), e.position());
set_rows_entry_continuous(e);
});
} else {
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), e.position(), _current_tombstone);
e.set_range_tombstone(_current_tombstone);
set_rows_entry_continuous(e);
}
}
maybe_drop_last_entry(_current_tombstone);
});
} else {
_read_context.cache().on_mispopulate();
}
}
inline
void cache_mutation_reader::maybe_add_to_cache(const clustering_row& cr) {
if (!can_populate()) {
_last_row = nullptr;
_population_range_starts_before_all_rows = false;
_read_context.cache().on_mispopulate();
return;
}
clogger.trace("csm {}: populate({}), rt={}", fmt::ptr(this), clustering_row::printer(*_schema, cr), _current_tombstone);
_lsa_manager.run_in_update_section_with_allocator([this, &cr] {
mutation_partition_v2& mp = _snp->version()->partition();
rows_entry::tri_compare cmp(table_schema());
if (_read_context.digest_requested()) {
cr.cells().prepare_hash(*_schema, column_kind::regular_column);
}
auto new_entry = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(table_schema(), cr.key(), cr.as_deletable_row()));
new_entry->set_continuous(false);
new_entry->set_range_tombstone(_current_tombstone);
auto it = _next_row.iterators_valid() && _next_row.at_a_row() ? _next_row.get_iterator_in_latest_version()
: mp.clustered_rows().lower_bound(cr.key(), cmp);
auto insert_result = mp.mutable_clustered_rows().insert_before_hint(it, std::move(new_entry), cmp);
it = insert_result.first;
if (insert_result.second) {
_snp->tracker()->insert(*it);
restore_continuity_after_insertion(it);
}
rows_entry& e = *it;
if (ensure_population_lower_bound()) {
if (_read_context.is_reversed()) [[unlikely]] {
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), _last_row.position());
set_rows_entry_continuous(*_last_row);
// _current_tombstone must also apply to _last_row itself (if it's non-dummy)
// because otherwise there would be a rtc after it, either creating a different entry,
// or clearing _last_row if population did not happen.
_last_row->set_range_tombstone(_current_tombstone);
} else {
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), e.position());
set_rows_entry_continuous(e);
e.set_range_tombstone(_current_tombstone);
}
} else {
_read_context.cache().on_mispopulate();
}
with_allocator(standard_allocator(), [&] {
_last_row = partition_snapshot_row_weakref(*_snp, it, true);
});
_population_range_starts_before_all_rows = false;
});
}
inline
bool cache_mutation_reader::maybe_add_to_cache(const range_tombstone_change& rtc) {
rows_entry::tri_compare q_cmp(*_schema);
clogger.trace("csm {}: maybe_add_to_cache({})", fmt::ptr(this), rtc);
// Don't emit the closing range tombstone change, we may continue from cache with the same tombstone.
// The following relies on !_underlying_upper_bound->is_clustering_row()
if (q_cmp(rtc.position(), *_underlying_upper_bound) == 0) {
_lower_bound = rtc.position();
return false;
}
auto prev = std::exchange(_current_tombstone, rtc.tombstone());
if (_current_tombstone == prev) {
return false;
}
if (!can_populate()) {
// _current_tombstone is now invalid and remains so for this reader. No need to change it.
_last_row = nullptr;
_population_range_starts_before_all_rows = false;
_read_context.cache().on_mispopulate();
return true;
}
_lsa_manager.run_in_update_section_with_allocator([&] {
mutation_partition_v2& mp = _snp->version()->partition();
rows_entry::tri_compare cmp(table_schema());
auto new_entry = alloc_strategy_unique_ptr<rows_entry>(
current_allocator().construct<rows_entry>(table_schema(), to_table_domain(rtc.position()), is_dummy::yes, is_continuous::no));
auto it = _next_row.iterators_valid() && _next_row.at_a_row() ? _next_row.get_iterator_in_latest_version()
: mp.clustered_rows().lower_bound(to_table_domain(rtc.position()), cmp);
auto insert_result = mp.mutable_clustered_rows().insert_before_hint(it, std::move(new_entry), cmp);
it = insert_result.first;
if (insert_result.second) {
_snp->tracker()->insert(*it);
restore_continuity_after_insertion(it);
}
rows_entry& e = *it;
if (ensure_population_lower_bound()) {
// underlying may emit range_tombstone_change fragments with the same position.
// In such case, the range to which the tombstone from the first fragment applies is empty and should be ignored.
//
// Note: we are using a query schema comparator to compare table schema positions here,
// but this is okay because we are only checking for equality,
// which is preserved by schema reversals.
if (q_cmp(_last_row.position(), it->position()) != 0) {
if (_read_context.is_reversed()) [[unlikely]] {
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), _last_row.position(), prev);
set_rows_entry_continuous(*_last_row);
_last_row->set_range_tombstone(prev);
} else {
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), e.position(), prev);
set_rows_entry_continuous(e);
e.set_range_tombstone(prev);
}
}
} else {
_read_context.cache().on_mispopulate();
}
with_allocator(standard_allocator(), [&] {
_last_row = partition_snapshot_row_weakref(*_snp, it, true);
});
_population_range_starts_before_all_rows = false;
});
return true;
}
inline
bool cache_mutation_reader::after_current_range(position_in_partition_view p) {
position_in_partition::tri_compare cmp(*_schema);
return cmp(p, _upper_bound) >= 0;
}
inline
void cache_mutation_reader::start_reading_from_underlying() {
clogger.trace("csm {}: start_reading_from_underlying(), range=[{}, {})", fmt::ptr(this), _lower_bound, _next_row_in_range ? _next_row.position() : _upper_bound);
_state = state::move_to_underlying;
_next_row.touch();
}
inline
void cache_mutation_reader::copy_from_cache_to_buffer() {
clogger.trace("csm {}: copy_from_cache, next_row_in_range={}, next={}", fmt::ptr(this), _next_row_in_range, _next_row);
_next_row.touch();
if (_next_row.range_tombstone() != _current_tombstone) {
position_in_partition::equal_compare eq(*_schema);
auto upper_bound = _next_row_in_range ? position_in_partition_view::before_key(_next_row.position()) : _upper_bound;
if (!eq(_lower_bound, upper_bound)) {
position_in_partition new_lower_bound(upper_bound);
auto tomb = _next_row.range_tombstone();
clogger.trace("csm {}: rtc({}, {}) ...{}", fmt::ptr(this), _lower_bound, tomb, new_lower_bound);
push_mutation_fragment(mutation_fragment_v2(*_schema, _permit, range_tombstone_change(_lower_bound, tomb)));
_current_tombstone = tomb;
_lower_bound = std::move(new_lower_bound);
_read_context.cache()._tracker.on_range_tombstone_read();
}
}
if (_next_row_in_range) {
bool remove_row = false;
if (_read_context.tombstone_gc_state() // do not compact rows when tombstone_gc_state is not set (used in some unit tests)
&& !_next_row.dummy()
&& _snp->at_latest_version()
&& _snp->at_oldest_version()) {
deletable_row& row = _next_row.latest_row();
tombstone range_tomb = _next_row.range_tombstone_for_row();
auto t = row.deleted_at();
t.apply(range_tomb);
auto row_tomb_expired = [&](row_tombstone tomb) {
return (tomb && tomb.max_deletion_time() < _gc_before);
};
auto is_row_dead = [&](const deletable_row& row) {
auto& m = row.marker();
return (!m.is_missing() && m.is_dead(_read_time) && m.deletion_time() < _gc_before);
};
if (row_tomb_expired(t) || is_row_dead(row)) {
const schema& row_schema = _next_row.latest_row_schema();
_read_context.cache()._tracker.on_row_compacted();
with_allocator(_snp->region().allocator(), [&] {
deletable_row row_copy(row_schema, row);
row_copy.compact_and_expire(row_schema, t.tomb(), _read_time, always_gc, _gc_before, nullptr);
std::swap(row, row_copy);
});
remove_row = row.empty();
auto tomb_expired = [&](tombstone tomb) {
return (tomb && tomb.deletion_time < _gc_before);
};
auto latests_range_tomb = _next_row.get_iterator_in_latest_version()->range_tombstone();
if (tomb_expired(latests_range_tomb)) {
_next_row.get_iterator_in_latest_version()->set_range_tombstone({});
}
}
}
if (_next_row.range_tombstone_for_row() != _current_tombstone) [[unlikely]] {
auto tomb = _next_row.range_tombstone_for_row();
auto new_lower_bound = position_in_partition::before_key(_next_row.position());
clogger.trace("csm {}: rtc({}, {})", fmt::ptr(this), new_lower_bound, tomb);
push_mutation_fragment(mutation_fragment_v2(*_schema, _permit, range_tombstone_change(new_lower_bound, tomb)));
_lower_bound = std::move(new_lower_bound);
_current_tombstone = tomb;
_read_context.cache()._tracker.on_range_tombstone_read();
}
if (remove_row) {
_read_context.cache()._tracker.on_row_compacted_away();
_lower_bound = position_in_partition::after_key(*_schema, _next_row.position());
partition_snapshot_row_weakref row_ref(_next_row);
move_to_next_entry();
with_allocator(_snp->region().allocator(), [&] {
cache_tracker& tracker = _read_context.cache()._tracker;
if (row_ref->is_linked()) {
tracker.get_lru().remove(*row_ref);
}
row_ref->on_evicted(tracker);
});
_snp->region().allocator().invalidate_references();
_next_row.force_valid();
} else {
// We add the row to the buffer even when it's full.
// This simplifies the code. For more info see #3139.
add_to_buffer(_next_row);
move_to_next_entry();
}
} else {
move_to_next_range();
}
}
inline
void cache_mutation_reader::move_to_end() {
finish_reader();
clogger.trace("csm {}: eos", fmt::ptr(this));
}
inline
void cache_mutation_reader::move_to_next_range() {
if (_current_tombstone) {
clogger.trace("csm {}: move_to_next_range: emit rtc({}, null)", fmt::ptr(this), _upper_bound);
push_mutation_fragment(mutation_fragment_v2(*_schema, _permit, range_tombstone_change(_upper_bound, {})));
_current_tombstone = {};
}
auto next_it = std::next(_ck_ranges_curr);
if (next_it == _ck_ranges_end) {
move_to_end();
_ck_ranges_curr = next_it;
} else {
move_to_range(next_it);
}
}
inline
void cache_mutation_reader::move_to_range(query::clustering_row_ranges::const_iterator next_it) {
auto lb = position_in_partition::for_range_start(*next_it);
auto ub = position_in_partition_view::for_range_end(*next_it);
_last_row = nullptr;
_lower_bound = std::move(lb);
_upper_bound = std::move(ub);
_ck_ranges_curr = next_it;
auto adjacent = _next_row.advance_to(_lower_bound);
_next_row_in_range = !after_current_range(_next_row.position());
clogger.trace("csm {}: move_to_range(), range={}, lb={}, ub={}, next={}", fmt::ptr(this), *_ck_ranges_curr, _lower_bound, _upper_bound, _next_row.position());
if (!adjacent && !_next_row.continuous()) {
// FIXME: We don't insert a dummy for singular range to avoid allocating 3 entries
// for a hit (before, at and after). If we supported the concept of an incomplete row,
// we could insert such a row for the lower bound if it's full instead, for both singular and
// non-singular ranges.
if (_ck_ranges_curr->start() && !query::is_single_row(*_schema, *_ck_ranges_curr)) {
// Insert dummy for lower bound
if (can_populate()) {
// FIXME: _lower_bound could be adjacent to the previous row, in which case we could skip this
clogger.trace("csm {}: insert dummy at {}", fmt::ptr(this), _lower_bound);
auto insert_result = with_allocator(_lsa_manager.region().allocator(), [&] {
rows_entry::tri_compare cmp(table_schema());
auto& rows = _snp->version()->partition().mutable_clustered_rows();
auto new_entry = alloc_strategy_unique_ptr<rows_entry>(current_allocator().construct<rows_entry>(table_schema(),
to_table_domain(_lower_bound), is_dummy::yes, is_continuous::no));
return rows.insert_before_hint(
_next_row.at_a_row() ? _next_row.get_iterator_in_latest_version() : rows.begin(),
std::move(new_entry),
cmp);
});
auto it = insert_result.first;
if (insert_result.second) {
_snp->tracker()->insert(*it);
}
_last_row = partition_snapshot_row_weakref(*_snp, it, true);
} else {
_read_context.cache().on_mispopulate();
}
}
start_reading_from_underlying();
}
}
// Drops _last_row entry when possible without changing logical contents of the partition.
// Call only when _last_row and _next_row are valid.
// Calling after ensure_population_lower_bound() is ok.
// _next_row must have a greater position than _last_row.
// Invalidates references but keeps the _next_row valid.
inline
void cache_mutation_reader::maybe_drop_last_entry(tombstone rt) noexcept {
// Drop dummy entry if it falls inside a continuous range.
// This prevents unnecessary dummy entries from accumulating in cache and slowing down scans.
//
// Eviction can happen only from oldest versions to preserve the continuity non-overlapping rule
// (See docs/dev/row_cache.md)
//
if (_last_row
&& !_read_context.is_reversed() // FIXME
&& _last_row->dummy()
&& _last_row->continuous()
&& _last_row->range_tombstone() == rt
&& _snp->at_latest_version()
&& _snp->at_oldest_version()) {
clogger.trace("csm {}: dropping unnecessary dummy at {}", fmt::ptr(this), _last_row->position());
with_allocator(_snp->region().allocator(), [&] {
cache_tracker& tracker = _read_context.cache()._tracker;
tracker.get_lru().remove(*_last_row);
_last_row->on_evicted(tracker);
});
_last_row = nullptr;
// There could be iterators pointing to _last_row, invalidate them
_snp->region().allocator().invalidate_references();
// Don't invalidate _next_row, move_to_next_entry() expects it to be still valid.
_next_row.force_valid();
}
}
// _next_row must be inside the range.
inline
void cache_mutation_reader::move_to_next_entry() {
clogger.trace("csm {}: move_to_next_entry(), curr={}", fmt::ptr(this), _next_row.position());
if (no_clustering_row_between(*_schema, _next_row.position(), _upper_bound)) {
move_to_next_range();
} else {
auto new_last_row = partition_snapshot_row_weakref(_next_row);
// In reverse mode, the cursor may fall out of the entries because there is no dummy before all rows.
// Hence !next() doesn't mean we can end the read. The cursor will be positioned before all rows and
// not point at any row. continuous() is still correctly set.
_next_row.next();
_last_row = std::move(new_last_row);
_next_row_in_range = !after_current_range(_next_row.position());
clogger.trace("csm {}: next={}, cont={}, in_range={}", fmt::ptr(this), _next_row.position(), _next_row.continuous(), _next_row_in_range);
if (!_next_row.continuous()) {
start_reading_from_underlying();
} else {
maybe_drop_last_entry(_next_row.range_tombstone());
}
}
}
inline
void cache_mutation_reader::offer_from_underlying(mutation_fragment_v2&& mf) {
clogger.trace("csm {}: offer_from_underlying({})", fmt::ptr(this), mutation_fragment_v2::printer(*_schema, mf));
if (mf.is_clustering_row()) {
maybe_add_to_cache(mf.as_clustering_row());
add_clustering_row_to_buffer(std::move(mf));
} else {
SCYLLA_ASSERT(mf.is_range_tombstone_change());
auto& chg = mf.as_range_tombstone_change();
if (maybe_add_to_cache(chg)) {
add_to_buffer(std::move(mf).as_range_tombstone_change());
}
}
}