MySQL 9.1.0
Source Code Documentation
btr0mtib.h
Go to the documentation of this file.
1/*****************************************************************************
2
3Copyright (c) 2023, 2024, Oracle and/or its affiliates.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
8
9This program is designed to work with certain software (including
10but not limited to OpenSSL) that is licensed under separate terms,
11as designated in a particular file or component or in included license
12documentation. The authors of MySQL hereby grant you an additional
13permission to link the program and your derivative works with the
14separately licensed software that they have either included with
15the program or referenced in the documentation.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
20for more details.
21
22You should have received a copy of the GNU General Public License along with
23this program; if not, write to the Free Software Foundation, Inc.,
2451 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25
26*****************************************************************************/
27
28/** @file include/btr0mtib.h
29
30 Multi Threaded Index Build (MTIB) using BUF_BLOCK_MEMORY and dedicated
31 Bulk_flusher threads.
32
33 Created 09/Feb/2023 Annamalai Gurusami
34 *************************************************************************/
35
36#ifndef btr0mtib_h
37#define btr0mtib_h
38
39#include <stddef.h>
40#include <vector>
41
42#include "btr0load.h"
43#include "ddl0impl-compare.h"
44#include "dict0dict.h"
45#include "lob0bulk.h"
46#include "lob0lob.h"
47#include "page0cur.h"
48#include "ut0class_life_cycle.h"
49#include "ut0new.h"
50#include "ut0object_cache.h"
51
52/* The Btree_multi namespace is used for multi-threaded parallel index build. */
53namespace Btree_multi {
54
55// Forward declaration.
56class Page_load;
57class Btree_load;
58struct Page_stat;
59
60using Blob_context = void *;
61
62namespace bulk {
63class Blob_inserter;
64} // namespace bulk
65
66/** Allocate, use, manage and flush one extent pages (FSP_EXTENT_SIZE). */
68 using Page_range_t = std::pair<page_no_t, page_no_t>;
69
70 /** Constructor.
71 @param[in] btree_load B-tree loader object.
72 @param[in] is_leaf true if this is part of leaf segment, false if this is
73 part of non-leaf (or top) segment. */
74 Page_extent(Btree_load *btree_load, const bool is_leaf);
75
76 /** Destructor. */
78
79 /** Next page number to be used. */
81
82 /** Page numbers of the pages that has been allocated in this extent.
83 The page range is [p1, p2), where p2 is not included. */
85
86 /** All the page loaders of the used pages. */
87 std::vector<Page_load *> m_page_loads;
88
89 bool is_btree_load_nullptr() const { return m_btree_load == nullptr; }
90
91 public:
92 /** Create an object of type Page_extent in the heap. */
93 static Page_extent *create(Btree_load *btree_load, const bool is_leaf,
94 const bool is_blob);
95
96 /** Release the page extent. Delete if not cached.
97 @param[in] extent extent to release */
98 static void drop(Page_extent *extent);
99
100 /** Number of pages in this extent. */
101 page_no_t page_count() const;
102
103 /** Reset the range with the given value.
104 @param[in] range new range value to be used. */
105 void reset_range(const Page_range_t &range);
106
107 /** Calculate the number of used pages.
108 return the number of used pages. */
109 size_t used_pages() const { return m_page_no - m_range.first; }
110
111 void get_page_numbers(std::vector<page_no_t> &page_numbers) const;
112
113 /** Get the index of the first unused page load.
114 @return index of the first unused page load. */
115 size_t last() const { return m_page_no - m_range.first; }
116
117 /** Check if the range is valid.
118 @return true if the range is valid, false otherwise. */
119 bool is_valid() const;
120
121 bool is_null() const {
122 return (m_range.first == FIL_NULL) && (m_range.second == FIL_NULL);
123 }
124
125 public:
126 /** Member of Page_extent. The index of page_load objects in the m_page_loads
127 corresponds to the page_no in the m_range. Here, check if a page_no already
128 has a Page_load object.
129 @param[in] page_no page_no for which we are looking for Page_load obj.
130 @return Page_load object if available, nullptr otherwise. */
132
133 /** Member of Page_extent. Associate the given page_no and the page load
134 object.
135 @param[in] page_no page number to associate.
136 @param[in] page_load page load object to associate. */
137 void set_page_load(page_no_t page_no, Page_load *page_load);
138
140
141 /** Initialize the next page number to be allocated. The page range should
142 have been already initialized. */
143 void init();
144
145 /** Check if no more pages are there to be used.
146 @return true if the page extent is completely used.
147 @return false if the page extent has more pages to be used. */
148 bool is_fully_used() const { return m_page_no == m_range.second; }
149
150 /** Check if there are any pages used.
151 @return true if at least one page is used.
152 @return false if no pages are used in this extent.*/
153 bool is_any_used() const {
154 ut_ad(m_page_no == m_range.first || m_page_loads.size() > 0);
155 return m_page_no > m_range.first;
156 }
157
158 public:
159 /** Allocate a page number. */
161
162 /** Save a page_load. */
163 void append(Page_load *page_load);
164
165 /** Flush the used pages to disk. It also frees the unused pages back to the
166 segment.
167 @param[in,out] node space file node
168 @param[in,out] iov vector IO array
169 @param[in] iov_size vector IO array size
170 @return On success, return DB_SUCCESS. */
171 dberr_t flush(fil_node_t *node, void *iov, size_t iov_size);
172
173 /** Flush one page at a time. This can be used when scatter/gather i/o is
174 not available for use.
175 @param[in,out] node space file node
176 @return On success, return DB_SUCCESS. */
178
179 /** Flush 1 extent pages at a time. Internally it will call OS dependent
180 API (either bulk_flush_win() on Windows or bulk_flush_linux() on other
181 operating systems.
182 @param[in,out] node space file node
183 @param[in,out] iov vector IO array
184 @param[in] iov_size vector IO array size
185 @return DB_SUCCESS on success, error code on failure. */
186 dberr_t bulk_flush(fil_node_t *node, void *iov [[maybe_unused]],
187 size_t iov_size [[maybe_unused]]);
188
189#ifdef UNIV_LINUX
190 /** Flush 1 extent pages at a time. Uses pwritev() i/o API.
191 @param[in,out] node space file node
192 @param[in,out] iov vector IO array
193 @param[in] iov_size vector IO array size
194 @return DB_SUCCESS on success, error code on failure. */
195 dberr_t bulk_flush_linux(fil_node_t *node, struct iovec *iov,
196 size_t iov_size);
197#endif /* UNIV_LINUX */
198
199 /** Free all resources. */
201
202 /** Free any cached page load entries. */
203 void destroy_cached();
204
205 space_id_t space() const;
206
207 /** Mark the extent as cached. Flush thread should not free this extent. */
208 void set_cached() { m_is_cached.store(true); }
209
210 /** Set and unset free state of a cached extent.
211 @param[in] free state to be set */
212 void set_state(bool free) { m_is_free.store(free); }
213
214 /** @return true iff the cached element is in free state. */
215 bool is_free() const { return m_is_free.load(); }
216
217 /** @return true iff it is a cached extent. */
218 bool is_cached() const { return m_is_cached.load(); }
219
220 /** Reset page load cache to free all. */
222
223 public:
224 std::ostream &print(std::ostream &out) const;
225
226 /** Mark that this extent is used for blobs. */
227 void set_blob() { m_is_blob = true; }
228
229 /** Check if this is a blob extent.
230 @return true if it is a blob extent. */
231 bool is_blob() const { return m_is_blob; }
232
233 /** Free the BUF_BLOCK_MEMORY blocks used by this extent. */
234 void free_memory_blocks();
235
236#ifdef UNIV_DEBUG
237 /** True if this extent has been handed over to the bulk flusher. */
238 std::atomic_bool m_is_owned_by_bulk_flusher{false};
239#endif /* UNIV_DEBUG */
240
241 private:
243
244 /** true if this extent belongs to leaf segment. */
245 bool m_is_leaf{true};
246
247 /** true iff the the extent is cached. */
248 std::atomic_bool m_is_cached{false};
249 /** true if the cached entry is free to be used. */
250 std::atomic_bool m_is_free{true};
251 /** Cached page loads. */
252 std::vector<Page_load *> m_cached_page_loads;
253 /** Next cached page load index. */
255
256 /** True if this extent is used for blobs. */
257 bool m_is_blob{false};
258
259 friend struct Level_ctx;
260};
261
263 std::vector<page_no_t> &page_numbers) const {
264 for (page_no_t i = m_range.first; i < m_page_no; ++i) {
265 page_numbers.push_back(i);
266 }
267}
268
270 Page_load *page_load) {
271 ut_ad(page_no >= m_range.first);
272 ut_ad(page_no < m_range.second);
273 const size_t idx = page_no - m_range.first;
274 if (idx == m_page_loads.size()) {
275 m_page_loads.push_back(page_load);
276 } else {
277 ut_ad(idx <= m_page_loads.size());
278 ut_ad(m_page_loads[idx] == nullptr);
279 m_page_loads[idx] = page_load;
280 }
281 ut_ad(m_page_loads.size() > 0);
282}
283
285 ut_ad(page_no >= m_range.first);
286 ut_ad(page_no < m_range.second);
287 const size_t idx = page_no - m_range.first;
288 if (m_page_loads.empty() || m_page_loads.size() <= idx) {
289 return nullptr;
290 }
291 return m_page_loads[idx];
292}
293
297 m_range.first = FIL_NULL;
298 m_range.second = FIL_NULL;
299 m_btree_load = nullptr;
300}
301
302inline bool Page_extent::is_valid() const {
303 ut_ad(m_range.first != 0);
304 ut_ad(m_range.second != 0);
305 if (is_null()) {
306 return true;
307 }
308 ut_ad(m_range.first < m_range.second);
309 ut_ad((m_range.second - m_range.first) <= FSP_EXTENT_SIZE);
310 return m_range.first < m_range.second;
311}
312
313inline std::ostream &Page_extent::print(std::ostream &out) const {
314 out << "[Page_extent: this=" << (void *)this
315 << ", m_range.first=" << m_range.first
316 << ", m_range.second=" << m_range.second
317 << ", page_loads=" << m_page_loads.size() << "]" << std::endl;
318 return out;
319}
320
321inline std::ostream &operator<<(std::ostream &out, const Page_extent &obj) {
322 return obj.print(out);
323}
324
326 ut_ad(range.first != 0);
327 ut_ad(range.second != 0);
328 ut_ad(range.first != FIL_NULL);
329 ut_ad(range.second != FIL_NULL);
330 m_range = range;
331 m_page_no = m_range.first;
332}
333
335 ut_ad(is_valid());
337
338 if (m_page_no == m_range.second) {
339 return FIL_NULL;
340 }
341 return m_page_no++;
342}
343
344inline void Page_extent::init() {
345 ut_ad(m_range.first != 0);
346 ut_ad(m_range.second != 0);
347 ut_ad(m_range.first != FIL_NULL);
348 ut_ad(m_range.second != FIL_NULL);
349 m_page_no = m_range.first;
350 m_page_loads.reserve(page_count());
351}
352
354 return m_range.second - m_range.first;
355}
356
357/** Context information for each level. */
358struct Level_ctx {
359 /** Static member function construct a Level_ctx object.
360 @param[in] index dictionary index object.
361 @param[in] level the B-tree level of this context object.
362 @param[in] btree_load a back pointer to the Btree_load object to which this
363 Level_ctx object is a part of.
364 @return level context object on success, nullptr on error. */
365 static Level_ctx *create(dict_index_t *index, size_t level,
366 Btree_load *btree_load);
367
368 /** Static member function to destroy a Level_ctx object.
369 @param[in] obj the Level_ctx object to destroy. */
370 static void destroy(Level_ctx *obj);
371
372 /** Constructor
373 @param[in] index dictionary index object.
374 @param[in] level the B-tree level of this context object.
375 @param[in] btree_load a back pointer to the Btree_load object to which this
376 Level_ctx object is a part of.*/
377 Level_ctx(dict_index_t *index, size_t level, Btree_load *btree_load)
378 : m_index(index),
379 m_level(level),
381 m_btree_load(btree_load) {}
382
383 /** Destructor. */
384 ~Level_ctx();
385
386 /** Initialize.
387 @return DB_SUCCESS on success, an error code on failure. */
388 dberr_t init();
389
390 /** Check if this is leaf level.
391 @return true if this is leaf level, false otherwise. */
392 bool is_leaf() const { return m_level == 0; }
393
395
396 /** Free the current page load. */
397 void free_page_load();
398
399 /** Allocate a page number. Subsequently a Page_load will be created with the
400 allocated page number.
401 @param[out] page_no page number that was allocated.
402 @return DB_SUCCESS on success, error code on failure.*/
404
405 /** Allocate one extent in the relevant file segment. No associated buffer
406 blocks are allocated.
407 @return DB_SUCCESS on success, error code on failure.*/
409
410 /** Allocate private memory buffer (BUF_BLOCK_MEMORY) block for given page
411 number. */
412 [[nodiscard]] buf_block_t *alloc(const page_no_t new_page_no) noexcept;
413
414 void set_current_page_load(Page_load *sibling);
415
416 Page_load *get_page_load() const;
417
418 trx_id_t get_trx_id() const;
419
420 /** The current extent that is being loaded. */
422
423 /** Build the extent cache. */
424 void build_extent_cache();
425
426 /** Load one extent from extent cache.
427 @return true iff successful. */
429
430 /** Build page loader cache for current exent. */
431 void build_page_cache();
432
433 /** Get a free page loader from cache
434 @return page loader or nullptr if not found. */
436
437 /** Pre allocated extents to prevent repeated allocation and free. */
438 std::vector<Page_extent *> m_cached_extents;
439
440 /** The page_no of the first page in this level. */
442
443 /** The page_no of the last page in this level. */
445
446 /** The index which is being built. */
448
449 /** The B-tree level whose context information is stored in this obj. */
450 const size_t m_level{};
451
452 /** The Page_load of the current page being loaded. */
454
455 /** A back pointer to conceptually higher level btree load object. */
457
458 /** Number of pages allocated at this level. */
460
461 /** Number of extents allocated at this level. */
463
464 /** True if the current extent is full. */
465 bool m_extent_full{true};
466
467#ifdef UNIV_DEBUG
468 bool is_page_tracked(const page_no_t &page_no) const;
469 std::vector<page_no_t> m_pages_allocated;
470#endif /* UNIV_DEBUG */
471};
472
474
476 m_page_load = sibling;
477}
478
480 public:
481 enum class Type {
482 /** Allocate by Page */
483 PAGE,
484 /** Allocate by extent. */
485 EXTENT
486 };
487
488 /** Destructor to ensure thread stop. */
490
491 /** Check size and set extent allocator size parameters
492 @param[in] table Innodb dictionary table object
493 @param[in] trx transaction performing bulk load
494 @param[in] size total data size to be loaded
495 @param[in] num_threads number of concurrent threads
496 @param[in] in_pages if true, allocate in pages
497 @return tablespace extend size in bytes. */
498 uint64_t init(dict_table_t *table, trx_t *trx, size_t size,
499 size_t num_threads, bool in_pages);
500
501 /* Start extent allocator thread. */
502 void start();
503
504 /** Stop extent allocator thread, if active. */
505 void stop();
506
507 /** Allocate a page range - currently ans Extent.
508 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
509 @param[in] alloc_page if true, allocate in pages otherwise allocate extent
510 @param[out] range page range
511 @param[in,out] fn_wait_begin begin callback if wait is needed
512 @param[in,out] fn_wait_end end callback if wait is needed
513 @return Innodb error code. */
514 dberr_t allocate(bool is_leaf, bool alloc_page, Page_range_t &range,
515 std::function<void()> &fn_wait_begin,
516 std::function<void()> &fn_wait_end);
517
518 private:
519 /** Upper bound for max ranges. */
520 static constexpr size_t S_MAX_RANGES = 2 * 1024;
521
522 /** Maximum size by which the tablespace is extended each time. */
523 static constexpr size_t S_BULK_EXTEND_SIZE_MAX = 64;
524
526 /** Initialize cache.
527 @param[in] max_range maximum number of extents to cache. */
528 void init(size_t max_range);
529
530 /** @return true if no available extent to consume. */
531 inline bool is_empty() const { return (m_num_allocated == m_num_consumed); }
532
533 /** @return true if cache is full and no more extents can be added. */
534 inline bool is_full() const {
536 }
537
538 /** Check for number of extents to be allocated and cached.
539 @param[out] num_alloc number of extents to allocate
540 @param[out] num_free number of free extents
541 @return true if succesful. */
542 bool check(size_t &num_alloc, size_t &num_free) const;
543
544 /** Get one page range from the cache.
545 @param[out] range the allocated page range
546 @param[out] alloc_trigger true, if need to trigger allocator
547 @return true if extent is successfully returned from cache. */
548 bool get_range(Page_range_t &range, bool &alloc_trigger);
549
550 /** Set allocated range(extent) in cache.
551 @param[in] index position of the range
552 @param[in] range page range to be set */
553 void set_range(size_t index, Page_range_t &range);
554
555 /** Cached page ranges already allocated to the segment. */
556 std::array<Page_range_t, S_MAX_RANGES> m_ranges;
557
558 /** Maximum number of ranges to pre-allocate. */
560
561 /** Total number of ranges allocated. */
562 std::atomic<size_t> m_num_allocated{0};
563
564 /** Total number of ranges allocated. */
565 std::atomic<size_t> m_num_consumed{0};
566 };
567
568 /** Extent thread executor.
569 @return innodb error code. */
570 dberr_t run();
571
572 /** Allocate extents and fill the cache.
573 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
574 @param[in] num_extents number of extents to allocate
575 @return innodb error code. */
576 dberr_t allocate_extents(bool is_leaf, size_t num_extents);
577
578 /** Allocator wait function. */
579 void allocator_wait() const;
580
581 /** Check if leaf and non-leaf extent cache needs to be filled.
582 @param[out] n_leaf number of leaf extents to allocate
583 @param[out] n_non_leaf number of non-leaf extents to allocate
584 @param[out] trigger true if consumers should be triggered
585 @return true if allocator should stop. */
586 bool check(size_t &n_leaf, size_t &n_non_leaf, bool &trigger);
587
588 /** Allocate one extent.
589 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
590 @param[in,out] mtr mini tranaction to be used for allocation
591 @param[out] range page rannge for the extent
592 @return innodb error code. */
593 dberr_t allocate_extent(bool is_leaf, mtr_t &mtr, Page_range_t &range);
594
595 /** Allocate one page.
596 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
597 @param[out] range page rannge for the page
598 @return innodb error code. */
599 dberr_t allocate_page(bool is_leaf, Page_range_t &range);
600
601 /** @return true if operation is interrupted. */
602 bool is_interrupted();
603
604 private:
605 /** Bulk extent allocator. */
606 std::thread m_thread;
607
608 /** Number of times consumer(s) had to wait. */
609 mutable size_t m_consumer_wait_count{};
610
611 /** Number of times allocator had to wait. */
612 mutable size_t m_allocator_wait_count{};
613
614 /** Total consumer wait time in micro seconds. */
615 mutable std::chrono::microseconds m_consumer_wait_time;
616
617 /** Total allocator wait time in micro seconds. */
618 mutable std::chrono::microseconds m_allocator_wait_time;
619
620 /** Page range type. */
622
623 /** Cached leaf extents. */
625
626 /** Cached non-leaf extents. */
628
629 /** This mutex protects the m_queue. */
630 mutable std::mutex m_mutex;
631
632 /** Condition variable for allocator thread. */
633 mutable std::condition_variable m_allocator_condition;
634
635 /** Condition variable for extent consumer threads. */
636 mutable std::condition_variable m_consumer_condition;
637
638 /** Flag to indicate if the bulk allocator thread should stop. */
639 bool m_stop{false};
640
641 /** Error code, protected by m_mutex */
643
644 /** Innodb dictionary table object. */
646
647 /** Innodb transaction - used for checking interrupt. */
649
650 /** Number of concurrent consumers. */
652};
653
655 public:
656 /** Thread main function.
657 @return innodb error code. */
658 dberr_t run();
659
660 /** Check if work is available for the bulk flusher thread.
661 @return true if work is available. */
662 bool is_work_available();
663
664 /** Start a new thread to do the flush work.
665 @param[in] space_id space for flushing pages to
666 @param[in] index loader index
667 @param[in] queue_size flusher queue size */
668 void start(space_id_t space_id, size_t index, size_t queue_size);
669
670 /** Add a page extent to the bulk flush queue.
671 @param[in,out] page_extent extent to be added to the queue
672 @param[in,out] fn_wait_begin begin callback if wait is needed
673 @param[in,out] fn_wait_end end callback if wait is needed */
674 void add(Page_extent *page_extent, std::function<void()> &fn_wait_begin,
675 std::function<void()> &fn_wait_end);
676
677 /** Check for flusher error and wake up flusher thread.
678 @return Innodb error code. */
680
681 /** Wait till the bulk flush thread stops. */
682 void wait_to_stop();
683
684 /** Get the maximum allowed queue size.
685 @return the maximum allowed queue size. */
686 size_t get_max_queue_size() const { return m_max_queue_size; }
687
688 /** Destructor. */
690
691 /** @return true iff error has occurred. */
692 bool is_error() const { return m_is_error.load(); }
693
694 /** @return error code */
695 dberr_t get_error() const;
696
697 void add_to_free_queue(Page_extent *page_extent);
698
700
701 private:
702 /** Do the actual work of flushing.
703 @param[in,out] node space file node
704 @param[in,out] iov vector IO array
705 @param[in] iov_size vector IO array size */
706 void do_work(fil_node_t *node, void *iov, size_t iov_size);
707
708 /** Check if the bulk flush thread should stop working. */
709 bool should_i_stop() const { return m_stop.load(); }
710
711 /** When no work is available, put the thread to sleep. */
712 void wait();
713
714 /** Print useful information to the server log file while exiting. */
715 void info();
716
717 /** This queue is protected by the m_mutex. */
718 std::vector<Page_extent *> m_queue;
719
720 /** This mutex protects the m_queue. */
721 mutable std::mutex m_mutex;
722
723 /** Condition variable to wait upon. */
724 mutable std::condition_variable m_condition;
725
726 /** This queue is protected by the m_free_mutex. It is used to cache the
727 Page_extent objects that have been flushed and ready for re-use. */
728 std::vector<Page_extent *> m_free_queue;
729
730 /** This mutex protects the m_free_queue. */
731 mutable std::mutex m_free_mutex;
732
733 /** Flag to indicate if the bulk flusher thread should stop. If true, the
734 bulk flusher thread will stop after emptying the queue. If false, the
735 bulk flusher thread will go to sleep after emptying the queue. */
736 std::atomic<bool> m_stop{false};
737
738 /** Set if error is encountered during flush. */
739 std::atomic<bool> m_is_error{false};
740
741 /** Error code, protected by m_mutex */
743
744 /** Set error code.
745 @param[in] error_code error code to set. It could be DB_SUCCESS.*/
746 void set_error(dberr_t error_code);
747
748 /** Private queue (private to the bulk flush thread) containing the extents to
749 flush. */
750 std::vector<Page_extent *> m_priv_queue;
751
752 /** Bulk flusher thread. */
753 std::thread m_flush_thread;
754
755 /** Number of times slept */
756 size_t m_n_sleep{};
757
758 /** Total sleep time in micro seconds. */
759 std::chrono::microseconds m_wait_time;
760
761 /** The sleep duration in milliseconds. */
762 static constexpr std::chrono::milliseconds s_sleep_duration{100};
763
764 /** Maximum queue size, defaults to 4 */
766
767 /** Number of pages flushed. */
769
770 /** Bulk flusher is specific to a tablespace for now. */
772
773 /** Flusher ID. */
774 size_t m_id{};
775
776#ifdef UNIV_DEBUG
777 public:
778 /** Vector of page numbers that are flushed by this Bulk_flusher object. */
779 std::vector<page_no_t> m_flushed_page_nos;
780#endif /* UNIV_DEBUG */
781};
782
783namespace bulk {
784
785class Blob_handle;
786
787/** Used to insert many blobs into InnoDB. */
789 public:
790 /** Constructor.
791 @param[in] btree_load the B-tree into which blobs are inserted. */
792 Blob_inserter(Btree_load &btree_load);
793
795
796 /** Initialize by allocating necessary resources.
797 @return DB_SUCCESS on success or a failure error code. */
798 dberr_t init();
799
800 void finish();
801
803 Blob_context blob_ctx;
804 dberr_t err = open_blob(blob_ctx, ref);
805 if (err != DB_SUCCESS) {
806 return err;
807 }
808 const byte *data = (const byte *)dfield->data;
809 err = write_blob(blob_ctx, ref, data, dfield->len);
810 if (err != DB_SUCCESS) {
811 return err;
812 }
813 return close_blob(blob_ctx, ref);
814 }
815
816 /** Create a blob.
817 @param[out] blob_ctx pointer to an opaque object representing a blob.
818 @param[out] ref blob reference to be placed in the record.
819 @return DB_SUCCESS on success or a failure error code. */
821
822 /** Write data into the blob.
823 @param[in] blob_ctx pointer to blob into which data is written.
824 @param[out] ref blob reference to be placed in the record.
825 @param[in] data buffer containing data to be written
826 @param[in] len length of the data to be written.
827 @return DB_SUCCESS on success or a failure error code. */
828 dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data,
829 size_t len);
830
831 /** Indicate that the blob has been completed, so that resources can be
832 removed, and as necessary flushing can be done.
833 @param[in] blob_ctx pointer to blob which has been completely written.
834 @param[out] ref a blob ref object.
835 @return DB_SUCCESS on success or a failure error code. */
837
838 /** Allocate a LOB first page
839 @return a LOB first page. */
841
842 /** Allocate a data page
843 @return a LOB data page. */
845
846 /** Allocate a LOB index page.
847 @return a LOB index page. */
849
850 /** Get the current transaction id.
851 @return the current transaction id. */
852 trx_id_t get_trx_id() const;
853
854 private:
856
858
860
861 /** Page extent from which to allocate first pages of blobs.
862 @ref lob::bulk::first_page_t. */
864
866
867 /** Page extent from which to allocate data pages of blobs.
868 @ref lob::bulk::data_page_t. */
870
871 /** Page extent from which to allocate index pages of blobs.
872 @ref lob::bulk::node_page_t. */
873 std::list<Page_extent *> m_index_extents;
874
875 /** The current blob being inserted. */
877
878 /** Cache of Page_load objects. */
880
881 /** Cache of Page_extent objects. */
883
884 /** Only one blob handle per sub-tree */
886};
887
888} /* namespace bulk */
889
890/** @note We should call commit(false) for a Page_load object, which is not in
891m_page_loaders after page_commit, and we will commit or abort Page_load
892objects in function "finish". */
894 public:
895 /** Merge multiple Btree_load sub-trees together. */
896 class Merger;
897
899 return m_blob_inserter.insert_blob(ref, dfield);
900 }
901
902 /** Create a blob.
903 @param[out] blob_ctx pointer to an opaque object representing a blob.
904 @param[out] ref blob reference to be placed in the record.
905 @return DB_SUCCESS on success or a failure error code. */
907 return m_blob_inserter.open_blob(blob_ctx, ref);
908 }
909
910 /** Write data into the blob.
911 @param[in] blob_ctx pointer to blob into which data is written.
912 @param[in,out] ref blob reference of the current blob
913 @param[in] data buffer containing data to be written
914 @param[in] len length of the data to be written.
915 @return DB_SUCCESS on success or a failure error code. */
916 dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data,
917 size_t len) {
918 return m_blob_inserter.write_blob(blob_ctx, ref, data, len);
919 }
920
921 /** Indicate that the blob has been completed, so that resources can be
922 removed, and as necessary flushing can be done.
923 @param[in] blob_ctx pointer to blob which has been completely written.
924 @param[out] ref blob reference of the closed blob.
925 @return DB_SUCCESS on success or a failure error code. */
927 return m_blob_inserter.close_blob(blob_ctx, ref);
928 }
929
930 public:
931 using Page_loaders = std::vector<Page_load *, ut::allocator<Page_load *>>;
932 using Level_ctxs = std::vector<Level_ctx *, ut::allocator<Level_ctx *>>;
933
934 /** Helper to set wait callbacks for the current scope. */
936 public:
937 using Function = std::function<void()>;
938 friend class Btree_load;
939
941 : m_btree_load(btree_load) {
944 }
945
947 m_btree_load->m_fn_wait_begin = nullptr;
948 m_btree_load->m_fn_wait_end = nullptr;
949 }
950
951 private:
952 /** Btree Load for the wait callbacks. */
954 };
955
956 /** Constructor
957 @param[in] index B-tree index.
958 @param[in] trx Transaction object.
959 @param[in] loader_num loader index
960 @param[in] flush_queue_size bulk flusher queue size
961 @param[in] allocator extent allocator */
962 Btree_load(dict_index_t *index, trx_t *trx, size_t loader_num,
963 size_t flush_queue_size,
964 Bulk_extent_allocator &allocator) noexcept;
965
966 /** Destructor */
967 ~Btree_load() noexcept;
968
969 /** Initialize. Allocates the m_heap_order memory heap.
970 @return DB_SUCCESS on success or an error code on failure. */
971 dberr_t init();
972
973#ifdef UNIV_DEBUG
974 /** Save flushed page numbers for debugging purposes.
975 @param[in] page_no page number of the page that is flushed. */
977 m_bulk_flusher.m_flushed_page_nos.push_back(page_no);
978 }
979#endif /* UNIV_DEBUG */
980
981 /** Check if the index build operation has been interrupted.
982 @return true if the index build operation is interrupted, false otherwise.*/
983 bool is_interrupted() const;
984
985 /** Trigger flusher thread and check for error.
986 @return Innodb error code. */
988
989 /** Get the index object.
990 @return index object. */
991 dict_index_t *index() const { return m_index; }
992
993 const char *get_table_name() const { return m_index->table->name.m_name; }
994
995 /** Get the root page number of this tree/subtree.
996 @return the root page number of this tree/subtree. */
997 page_no_t get_subtree_root() const { return m_first_page_nos.back(); }
998
999 /** Get the level of the root page.
1000 @return the level of the root page. */
1001 size_t get_root_level() const { return m_root_level; }
1002
1003 /** Get information about root page. */
1004 void get_root_page_stat(Page_stat &stat);
1005
1006 /** Get the transaction id.
1007 @return the transaction id. */
1008 trx_id_t get_trx_id() const;
1009
1010 /** Btree bulk load finish. We commit the last page in each level
1011 and copy the last page in top level to the root page of the index
1012 if no error occurs.
1013 @param[in] is_err Whether bulk load was successful until now
1014 @param[in] subtree true if a subtree is being built, false otherwise.
1015 @return error code */
1016 [[nodiscard]] dberr_t finish(bool is_err, const bool subtree) noexcept;
1017
1018 /** Insert a tuple to a page in a level
1019 @param[in] dtuple Tuple to insert
1020 @param[in] level B-tree level
1021 @return error code */
1022 [[nodiscard]] dberr_t insert(dtuple_t *dtuple, size_t level) noexcept;
1023
1024 /** Split the right most block of the tree at the given level.
1025 @param[in,out] block the right most block at the given level.
1026 @param[in] level level of the given block.
1027 @param[in] node_ptr node pointer to be inserted in the block after
1028 splitting.
1029 @param[in] mtr mini transaction context.
1030 @param[in,out] highest_level highest level among all the subtrees.*/
1031 void split_rightmost(buf_block_t *block, size_t level, dtuple_t *node_ptr,
1032 mtr_t *mtr, size_t &highest_level);
1033
1034 /** Split the left most block of the tree at the given level.
1035 @param[in,out] block the left most block at the given level. it will be
1036 updated with the new left most block.
1037 @param[in] level level of the given block.
1038 @param[in] node_ptr node pointer to be inserted in the block after
1039 splitting.
1040 @param[in] mtr mini transaction context.
1041 @param[in,out] highest_level highest level among all the subtrees.*/
1042 void split_leftmost(buf_block_t *&block, size_t level, dtuple_t *node_ptr,
1043 mtr_t *mtr, size_t &highest_level);
1044
1045 private:
1046 /** Set the root page on completion.
1047 @param[in] last_page_no Last page number (the new root).
1048 @return DB_SUCCESS or error code. */
1049 [[nodiscard]] dberr_t load_root_page(page_no_t last_page_no) noexcept;
1050
1051 public:
1052 /** Commit(finish) a page. We set next/prev page no, insert a node pointer to
1053 father page if needed, and commit mini-transaction.
1054 @param[in] page_load Page to commit
1055 @param[in] next_page_load Next page
1056 @param[in] insert_father Flag whether need to insert node ptr
1057 @return error code */
1058 [[nodiscard]] dberr_t page_commit(Page_load *page_load,
1059 Page_load *next_page_load,
1060 bool insert_father) noexcept;
1061
1062 /** Prepare space to insert a tuple.
1063 @param[in,out] page_load Page bulk that will be used to store the record.
1064 It may be replaced if there is not enough space
1065 to hold the record.
1066 @param[in] level B-tree level
1067 @param[in] rec_size Record size
1068 @return error code */
1069 [[nodiscard]] dberr_t prepare_space(Page_load *&page_load, size_t level,
1070 size_t rec_size) noexcept;
1071
1072 /** Insert a tuple to a page.
1073 @param[in] page_load Page bulk object
1074 @param[in] tuple Tuple to insert
1075 @param[in] big_rec Big record vector, maybe NULL if there is no
1076 Data to be stored externally.
1077 @param[in] rec_size Record size
1078 @return error code */
1079 [[nodiscard]] dberr_t insert(Page_load *page_load, dtuple_t *tuple,
1080 big_rec_t *big_rec, size_t rec_size) noexcept;
1081
1082 /** Btree page bulk load finish. Commits the last page in each level
1083 if no error occurs. Also releases all page bulks.
1084 @param[in] is_err Whether bulk load was successful until now
1085 @param[out] last_page_no Last page number
1086 @return error code */
1087 [[nodiscard]] dberr_t finalize_page_loads(bool is_err,
1088 page_no_t &last_page_no) noexcept;
1089
1090 public:
1091 /** Allocate an extent.
1092 @param[in,out] page_range the range of pages allocated.
1093 @param[in] level btree level for which pages are allocated.
1094 @return status code. */
1095 dberr_t alloc_extent(Page_range_t &page_range, size_t level);
1096
1097 /** Initiate a direct file write operation.
1098 @param[in] block block to be written to disk.
1099 @return error code. */
1100 [[nodiscard]] dberr_t fil_io(buf_block_t *block) noexcept;
1101
1102 /** Flush the blob pages.
1103 @return status code. */
1104 [[nodiscard]] dberr_t flush_blobs() noexcept;
1105
1106 /** Add the given block the internal cache of blocks.
1107 @param[in] block the block to be cached. */
1108 inline void block_put(buf_block_t *block);
1109
1110 /** Remove the given block from the internal cache of blocks.
1111 @param[in] page_no the page number of block to be removed from cache. */
1112 inline void block_remove(const page_no_t page_no);
1113
1114 /** Search for a BUF_BLOCK_MEMORY block with given page number in the local
1115 cache.
1116 @param[in] page_no the page number of block to be fetched.
1117 @return buffer block with given page number. */
1118 [[nodiscard]] inline buf_block_t *block_get(page_no_t page_no) const noexcept;
1119
1120 /** Evict all the pages in the given range from the buffer pool.
1121 @param[in] range range of page numbers.
1122 @param[in] dirty_is_ok it is OK for a page to be dirty. */
1123 void force_evict(const Page_range_t &range, const bool dirty_is_ok = true);
1124
1125 public:
1126 /** Check if a new level is needed. */
1127 bool is_new_level(size_t level) const { return level >= m_level_ctxs.size(); }
1128
1129 /** Last page numbers of each level. */
1130 std::vector<page_no_t, ut::allocator<page_no_t>> m_last_page_nos{};
1131
1132 /** First page numbers of each level. */
1133 std::vector<page_no_t, ut::allocator<page_no_t>> m_first_page_nos{};
1134
1135 /** Get the level context object.
1136 @param[in] level the level number. level 0 is leaf level.
1137 @return the level context object. */
1138 Level_ctx *get_level(size_t level) const;
1139
1140 /** Page numbers of the pages that has been allocated in the leaf level.
1141 The page range is [p1, p2), where p2 is not included. */
1143
1144 /** Page numbers of the pages that has been allocated in the non-leaf level.
1145 The page range is [p1, p2), where p2 is not included. */
1147
1150
1151 /** State of the index. Used for asserting at the end of a
1152 bulk load operation to ensure that the online status of the
1153 index does not change */
1155
1156 /** Number of extents allocated for this B-tree. */
1158
1159 /** Number of pages allocated for this B-tree. */
1161
1162 public:
1163 std::ostream &print_left_pages(std::ostream &out) const;
1164 std::ostream &print_right_pages(std::ostream &out) const;
1165
1166 dberr_t check_key_overlap(const Btree_load *r_btree) const;
1167
1168#ifdef UNIV_DEBUG
1169 void print_tree_pages() const;
1170 std::string print_pages_in_level(const size_t level) const;
1171 /** Check size and validate index of limited size.
1172 @param[in] index Index to validate
1173 @return true if successful. */
1174 static bool validate_index(dict_index_t *index);
1175#endif /* UNIV_DEBUG */
1176
1177 /** All allocated extents registers with Btree_load. */
1178 void track_extent(Page_extent *page_extent);
1179
1180 /** Add fully used extents to the bulk flusher. Call this whenever a new
1181 Page_load is allocated, with finish set to false. Only in
1182 Btree_load::finish(), the finish argument will be true.
1183 @param[in] finish if true, add all the tracked extents to the bulk flusher,
1184 irrespective of whether it is fully used or not. */
1185 void add_to_bulk_flusher(bool finish = false);
1186
1187 /** Add the given page extent object to the bulk flusher.
1188 @param[in] page_extent the extent to be flushed. */
1189 void add_to_bulk_flusher(Page_extent *page_extent);
1190
1191 /** Check if transparent page compression (TPC) is enabled.
1192 @return true if TPC is enabled. */
1193 bool is_tpc_enabled() const;
1194
1195 /** Check if transparent page encryption (TPE) is enabled.
1196 @return true if TPE is enabled. */
1197 bool is_tpe_enabled() const;
1198
1199 /** @return get flush queue size limit. */
1202 }
1203
1204 /** If the data is already sorted and checked for duplicates, then we can
1205 disable doing it again. */
1207
1208 private:
1209 /** Page allocation type. We allocate in extents by default. */
1212
1213 /** Number of records inserted. */
1214 uint64_t m_n_recs{};
1215
1216 /** B-tree index */
1218
1220
1221 /** Transaction id */
1223
1224 /** Root page level */
1226
1227 private:
1228 /** Context information for each level of the B-tree. The leaf level is at
1229 m_level_ctxs[0]. */
1231
1232 /** Reference to global extent allocator. */
1234
1235 /** Extents that are being tracked. */
1236 std::list<Page_extent *> m_extents_tracked;
1237
1238 /** If true, check if data is inserted in sorted order. */
1239 bool m_check_order{true};
1240
1241 /** Memory heap to be used for sort order checks. */
1243
1244 /** Function object to compare two tuples. */
1246
1247 /** The previous tuple that has been inserted. */
1249
1250 bool is_extent_tracked(const Page_extent *page_extent) const;
1251
1252 /** Loader number. */
1254
1256
1257 /* Begin wait callback function. */
1259
1260 /* End wait callback function. */
1262
1263 /** Blob inserter that will be used to handle all the externally stored
1264 fields of InnoDB. */
1266
1267 /* Dedicated thread to flush pages. */
1269
1271};
1272
1274 public:
1275 using Btree_loads = std::vector<Btree_load *, ut::allocator<Btree_load *>>;
1276
1278 : m_btree_loads(loads),
1279 m_index(index),
1280 m_trx(trx),
1282
1283 dberr_t merge(bool sort);
1284
1285 private:
1286 /** Get the maximum free space available in an empty page in bytes.
1287 @return the maximum free space available in an empty page. */
1288 size_t get_max_free() const {
1290 }
1291
1292 /** Remove any empty sub-trees with no records. */
1293 void remove_empty_subtrees();
1294
1295#ifdef UNIV_DEBUG
1296 /** Validate sub-tree boundaries. */
1297 void validate_boundaries();
1298
1299#endif /* UNIV_DEBUG */
1300
1301 /** Stich sub-trees together to form a tree with one or multiple
1302 nodes at highest leve.
1303 @param[out] highest_level highest level of the merged tree.
1304 @return innodb error code. */
1305 dberr_t subtree_link_levels(size_t &highest_level);
1306
1307 /** Create root node for the stiched sub-trees by combining the nodes
1308 at highest level creating another level if required.
1309 @param[in] highest_level highest level of the merged tree.
1310 @return innodb error code. */
1311 dberr_t add_root_for_subtrees(const size_t highest_level);
1312
1313 /** Insert the given list of node pointers into pages at the given level.
1314 @param[in,out] all_node_ptrs list of node pointers
1315 @param[in,out] total_node_ptrs_size total space in bytes needed to insert
1316 all the node pointers.
1317 @param[in] level the level at which the node pointers are inserted.
1318 @return DB_SUCCESS if successful.
1319 @return error code on failure. */
1320 dberr_t insert_node_ptrs(std::vector<dtuple_t *> &all_node_ptrs,
1321 size_t &total_node_ptrs_size, size_t level);
1322
1323 /** Load the left page and update its FIL_PAGE_NEXT.
1324 @param[in] l_page_no left page number
1325 @param[in] r_page_no right page number. */
1326 void link_right_sibling(const page_no_t l_page_no, const page_no_t r_page_no);
1327
1328 private:
1329 /** Refernce to the subtrees to be merged. */
1331
1332 /** Index which is being built. */
1334
1335 /** Transaction making the changes. */
1337
1338 /** Memory heap to store node pointers. */
1340};
1341
1343 const Page_extent *page_extent) const {
1344 for (auto e : m_extents_tracked) {
1345 if (page_extent == e) {
1346 return true;
1347 }
1348 }
1349 return false;
1350}
1351
1352/** The proper function call sequence of Page_load is as below:
1353-- Page_load::init
1354-- Page_load::insert
1355-- Page_load::finish
1356-- Page_load::commit */
1358 public:
1360
1361 /** Ctor.
1362 @param[in] index B-tree index
1363 @param[in] btree_load btree object to which this page belongs. */
1364 Page_load(dict_index_t *index, Btree_load *btree_load);
1365
1366 /** Destructor. */
1367 ~Page_load() noexcept;
1368
1369 /** Check if page is corrupted.
1370 @return true if corrupted, false otherwise. */
1371 bool is_corrupted() const;
1372
1373 /** Print the child page numbers. */
1374 void print_child_page_nos() noexcept;
1375
1376 /** Check if state of this page is BUF_BLOCK_MEMORY.
1377 @return true if page state is BUF_BLOCK_MEMORY, false otherwise.*/
1378 bool is_memory() const { return m_block->is_memory(); }
1379
1380 /** A static member function to create this object.
1381 @param[in] btree_load the bulk load object to which this Page_load belongs.
1382 @param[in] page_extent page extent to which this page belongs. */
1383 static Page_load *create(Btree_load *btree_load, Page_extent *page_extent);
1384
1385 /** Release the page loader. Delete if not cached.
1386 @param[in] page_load page loader to delete. */
1387 static void drop(Page_load *page_load);
1388
1389 /** Constructor
1390 @param[in] index B-tree index
1391 @param[in] trx_id Transaction id
1392 @param[in] page_no Page number
1393 @param[in] level Page level
1394 @param[in] observer Flush observer
1395 @param[in] btree_load btree object to which this page belongs. */
1397 size_t level, Flush_observer *observer,
1398 Btree_load *btree_load = nullptr) noexcept
1399 : m_index(index),
1400 m_trx_id(trx_id),
1401 m_page_no(page_no),
1402 m_level(level),
1404 m_flush_observer(observer),
1405 m_btree_load(btree_load) {
1407 }
1408
1409 /** Set the transaction id.
1410 @param[in] trx_id the transaction id to used. */
1411 void set_trx_id(const trx_id_t trx_id) { m_trx_id = trx_id; }
1412
1413 /** Get the current transaction identifier.
1414 @return the current transaction identifier.*/
1415 trx_id_t get_trx_id() const { return m_trx_id; }
1416
1417 /** Set the flush observer.
1418 @param[in] observer the flush observer object to use. */
1420 m_flush_observer = observer;
1421 }
1422
1423 bool is_leaf() const { return m_level == 0; }
1424
1425 /** Set the page number of this object. */
1426 void set_page_no(const page_no_t page_no);
1427
1428 void set_leaf_seg(const fseg_header_t *hdr) {
1430 }
1431 void set_top_seg(const fseg_header_t *hdr) {
1433 }
1434
1435 /** Initialize members and allocate page if needed and start mtr.
1436 @note Must be called and only once right after constructor.
1437 @return error code */
1438 [[nodiscard]] dberr_t init() noexcept;
1439 [[nodiscard]] dberr_t init_mem(const page_no_t new_page_no,
1440 Page_extent *page_extent) noexcept;
1441
1442 /** Initialize a memory block to be used for storing blobs.
1443 @param[in] page_no the page number to be set in the memory block.
1444 @param[in] page_extent extent to which this page belongs.
1445 @return DB_SUCCESS on success, error code on failure.*/
1446 [[nodiscard]] dberr_t init_mem_blob(const page_no_t page_no,
1447 Page_extent *page_extent) noexcept;
1448
1449 /** Allocate a page for this Page_load object.
1450 @return DB_SUCCESS on success, error code on failure. */
1451 dberr_t alloc() noexcept;
1452
1453 /** Re-initialize this page. */
1454 [[nodiscard]] dberr_t reinit() noexcept;
1455
1456 /** Reset this object so that Page_load::init() can be called again on this
1457 object. */
1458 void reset() noexcept;
1459
1460 /** Insert a tuple in the page.
1461 @param[in] tuple Tuple to insert
1462 @param[in] big_rec External record
1463 @param[in] rec_size Record size
1464 @return error code */
1465 [[nodiscard]] dberr_t insert(const dtuple_t *tuple, const big_rec_t *big_rec,
1466 size_t rec_size) noexcept;
1467
1468 /** Mark end of insertion to the page. Scan records to set page dirs,
1469 and set page header members. The scan is incremental (slots and records
1470 which assignment could be "finalized" are not checked again. Check the
1471 m_slotted_rec_no usage, note it could be reset in some cases like
1472 during split.
1473 Note: we refer to page_copy_rec_list_end_to_created_page.*/
1474 void finish() noexcept;
1475
1476 /** Commit mtr for a page
1477 @return DB_SUCCESS on success, error code on failure. */
1478 dberr_t commit() noexcept;
1479
1480 /** Commit mtr for a page */
1481 void rollback() noexcept;
1482
1483 /** Check whether the record needs to be stored externally.
1484 @return false if the entire record can be stored locally on the page */
1485 [[nodiscard]] bool need_ext(const dtuple_t *tuple,
1486 size_t rec_size) const noexcept;
1487
1488 /** Store externally the first possible field of the given tuple.
1489 @return true if a field was stored externally, false if it was not possible
1490 to store any of the fields externally. */
1491 [[nodiscard]] bool make_ext(dtuple_t *tuple);
1492
1493 /** Get node pointer
1494 @return node pointer */
1495 [[nodiscard]] dtuple_t *get_node_ptr() noexcept;
1496
1497 /** Get node pointer
1498 @param[in] heap allocate node pointer in the given heap.
1499 @return node pointer */
1500 [[nodiscard]] dtuple_t *get_node_ptr(mem_heap_t *heap) noexcept;
1501
1502 /** Copy all records from page.
1503 @param[in] src_page Page with records to copy. */
1504 size_t copy_all(const page_t *src_page) noexcept;
1505
1506 /** Distribute all records from this page to the given pages.
1507 @param[in,out] to_pages array of Page_load objects.
1508 return total number of records processed. */
1509 size_t copy_to(std::vector<Page_load *> &to_pages);
1510
1511 /** Set next page
1512 @param[in] next_page_no Next page no */
1513 void set_next(page_no_t next_page_no) noexcept;
1514
1515 /** Set previous page
1516 @param[in] prev_page_no Previous page no */
1517 void set_prev(page_no_t prev_page_no) noexcept;
1518
1519 /** Get previous page (FIL_PAGE_PREV). */
1520 page_no_t get_prev() noexcept;
1521
1522 /** Start mtr and latch block */
1523 void latch() noexcept;
1524
1525 /** Check if required space is available in the page for the rec
1526 to be inserted. We check fill factor & padding here.
1527 @param[in] rec_size Required space
1528 @return true if space is available */
1529 [[nodiscard]] inline bool is_space_available(size_t rec_size) const noexcept;
1530
1531 /** Get the page number of this page load object.
1532 @return the page number of this page load object. */
1533 [[nodiscard]] page_no_t get_page_no() const noexcept { return m_page_no; }
1534
1535 [[nodiscard]] page_id_t get_page_id() const noexcept {
1536 return m_block->page.id;
1537 }
1538
1539 /** Get the physical page size of the underlying tablespace.
1540 @return the physical page size of the tablespace. */
1541 size_t get_page_size() const noexcept;
1542
1543 /** Get the table space ID.
1544 @return the table space ID. */
1545 space_id_t space() const noexcept;
1546
1547#ifdef UNIV_DEBUG
1548 /** Obtain tablespace id from the frame and the buffer block and ensure that
1549 they are the same.
1550 @return true if space id is same in both places. */
1551 bool verify_space_id() const;
1552#endif /* UNIV_DEBUG */
1553
1554 /** Get page level */
1555 [[nodiscard]] size_t get_level() const noexcept { return m_level; }
1556
1557 /** Set the level of this page. */
1558 void set_level(size_t level) noexcept { m_level = level; }
1559
1560 /** Get record no */
1561 [[nodiscard]] size_t get_rec_no() const { return m_rec_no; }
1562
1563 /** Get page */
1564 [[nodiscard]] const page_t *get_page() const noexcept {
1566 }
1567
1568 [[nodiscard]] page_t *get_page() noexcept {
1570 }
1571
1572 public:
1573 void init_for_writing();
1574 size_t get_data_size() const { return page_get_data_size(m_page); }
1575
1576#ifdef UNIV_DEBUG
1577 /** Check if index is X locked
1578 @return true if index is locked. */
1579 bool is_index_locked() noexcept;
1580#endif /* UNIV_DEBUG */
1581
1582 /** Copy given and all following records.
1583 @param[in] first_rec First record to copy */
1584 size_t copy_records(const rec_t *first_rec) noexcept;
1585
1586 /** Insert a record in the page, check for duplicates too.
1587 @param[in] rec Record
1588 @param[in] offsets Record offsets
1589 @return DB_SUCCESS or error code. */
1590 dberr_t insert(const rec_t *rec, Rec_offsets offsets) noexcept;
1591
1592 public:
1593 /** Store external record
1594 Since the record is not logged yet, so we don't log update to the record.
1595 the blob data is logged first, then the record is logged in bulk mode.
1596 @param[in] big_rec External record
1597 @param[in] offsets Record offsets
1598 @return error code */
1599 [[nodiscard]] dberr_t store_ext(const big_rec_t *big_rec,
1600 Rec_offsets offsets) noexcept;
1601
1602 /** Set the REC_INFO_MIN_REC_FLAG on the first user record in this page.
1603 @param[in] mtr mini transaction context. */
1604 void set_min_rec_flag(mtr_t *mtr);
1605
1606 /** Set the REC_INFO_MIN_REC_FLAG on the first user record in this page. */
1607 void set_min_rec_flag();
1608 bool is_min_rec_flag() const;
1609
1610 /** Set the level context object for this page load
1611 @param[in] level_ctx the level context object. */
1612 void set_level_ctx(Level_ctx *level_ctx) { m_level_ctx = level_ctx; }
1613
1614 /** Check if this page load object contains a level context object.
1615 @return true if the page load contains a level context object.
1616 @return false if the page load does NOT contain a level context object.*/
1617 bool has_level_ctx() const { return m_level_ctx != nullptr; }
1618
1619 /** Free the memory block. */
1620 void free();
1621
1623
1625
1626 void set_page_extent(Page_extent *page_extent) {
1627 m_page_extent = page_extent;
1628 }
1629
1630 /** Mark the Page load as cached. Flush thread should not free this Page. */
1631 void set_cached() { m_is_cached.store(true); }
1632
1633 /** @return true iff it is a cached Page Load. */
1634 bool is_cached() const { return m_is_cached.load(); }
1635
1636 private:
1637 /** Memory heap for internal allocation */
1639
1640 /** The index B-tree */
1642
1643 /** The min-transaction */
1645
1646 /** The transaction id */
1648
1649 /** The buffer block */
1651
1652 /** The page */
1654
1655 /** The current rec, just before the next insert rec */
1657
1658 /** The page no */
1660
1661 /** The page level in B-tree */
1662 size_t m_level{};
1663
1664 /** Flag: is page in compact format */
1665 const bool m_is_comp{};
1666
1667 /** The heap top in page for next insert */
1668 byte *m_heap_top{};
1669
1670 /** User record no */
1671 size_t m_rec_no{};
1672
1673 /** The free space left in the page */
1675
1676 /** The reserved space for fill factor */
1678
1679 /** Total data in the page */
1681
1682 /** The modify clock value of the buffer block
1683 when the block is re-pinned */
1684 uint64_t m_modify_clock{};
1685
1686 /** Flush observer */
1688
1689 /** Last record assigned to a slot. */
1691
1692 /** Number of records assigned to slots. */
1694
1695 /** Page modified flag. */
1697
1699
1701
1703
1704 /** true iff the the Page load is cached. */
1705 std::atomic_bool m_is_cached{false};
1706
1707 friend class Btree_load;
1708};
1709
1711 return get_node_ptr(m_heap);
1712}
1713
1714inline space_id_t Page_load::space() const noexcept { return m_index->space; }
1715
1716inline size_t Page_load::get_page_size() const noexcept {
1717 const page_size_t page_size = m_index->get_page_size();
1718 return page_size.physical();
1719}
1720
1721inline Level_ctx *Btree_load::get_level(size_t level) const {
1722 ut_a(m_level_ctxs.size() > level);
1723 return m_level_ctxs[level];
1724}
1725
1726/** Information about a buffer page. */
1728 /** Number of user records in the page. */
1729 size_t m_n_recs;
1730
1731 /** Number of bytes of data. */
1733};
1734
1735inline void Page_extent::append(Page_load *page_load) {
1736 ut_ad(page_load->get_block() != nullptr);
1737 ut_ad(page_load->is_memory());
1738 ut_ad(page_load->get_page_no() >= m_range.first);
1739 ut_ad(page_load->get_page_no() < m_range.second);
1740 for (auto &iter : m_page_loads) {
1741 if (iter->get_page_no() == page_load->get_page_no()) {
1742 /* Page already appended. Don't append again. */
1743 return;
1744 }
1745 }
1747 m_page_loads.push_back(page_load);
1748}
1749
1751 return m_btree_load->get_trx_id();
1752}
1753
1755 return m_btree_load->index()->space;
1756}
1757
1758inline Page_extent::Page_extent(Btree_load *btree_load, const bool is_leaf)
1759 : m_page_no(FIL_NULL),
1760 m_range(FIL_NULL, FIL_NULL),
1761 m_btree_load(btree_load),
1762 m_is_leaf(is_leaf) {
1764}
1765
1767 const bool is_leaf, bool skip_track) {
1768 Page_extent *p = ut::new_withkey<Page_extent>(UT_NEW_THIS_FILE_PSI_KEY,
1769 btree_load, is_leaf);
1770 if (!skip_track) {
1771 btree_load->track_extent(p);
1772 }
1773 p->m_is_cached.store(false);
1774 return p;
1775}
1776
1777inline void Page_extent::drop(Page_extent *extent) {
1778 if (extent == nullptr) {
1779 return;
1780 }
1781 if (extent->is_cached()) {
1782 ut_a(!extent->is_free());
1783 bool free = true;
1784 extent->set_state(free);
1785 return;
1786 }
1787 ut::delete_(extent);
1788}
1789
1790/** Function object to compare two Btree_load objects. */
1793 bool operator()(const Btree_load *l_btree, const Btree_load *r_btree);
1795};
1796
1797#ifdef UNIV_DEBUG
1800#endif /* UNIV_DEBUG */
1801
1803 for (auto page_load : m_page_loads) {
1804 page_load->free();
1805 }
1806}
1807
1808namespace bulk {
1810 return m_btree_load.get_trx_id();
1811}
1812} /* namespace bulk */
1813
1814} /* namespace Btree_multi */
1815
1816#endif /* btr0mtib_h */
uint32_t space_id_t
Tablespace identifier.
Definition: api0api.h:48
uint32_t page_no_t
Page number.
Definition: api0api.h:46
Kerberos Client Authentication nullptr
Definition: auth_kerberos_client_plugin.cc:251
std::pair< page_no_t, page_no_t > Page_range_t
Definition: btr0btr.h:131
The B-tree bulk load.
static buf_frame_t * buf_block_get_frame(const buf_block_t *block)
Gets a pointer to the memory frame of a block.
Definition: btr0load.h:51
Definition: btr0mtib.h:1273
Btree_loads & m_btree_loads
Refernce to the subtrees to be merged.
Definition: btr0mtib.h:1330
std::vector< Btree_load *, ut::allocator< Btree_load * > > Btree_loads
Definition: btr0mtib.h:1275
dict_index_t * m_index
Index which is being built.
Definition: btr0mtib.h:1333
void validate_boundaries()
Validate sub-tree boundaries.
Definition: btr0mtib.cc:2707
dberr_t insert_node_ptrs(std::vector< dtuple_t * > &all_node_ptrs, size_t &total_node_ptrs_size, size_t level)
Insert the given list of node pointers into pages at the given level.
Definition: btr0mtib.cc:3173
size_t get_max_free() const
Get the maximum free space available in an empty page in bytes.
Definition: btr0mtib.h:1288
Merger(Btree_loads &loads, dict_index_t *index, trx_t *trx)
Definition: btr0mtib.h:1277
void remove_empty_subtrees()
Remove any empty sub-trees with no records.
Definition: btr0mtib.cc:2694
dberr_t add_root_for_subtrees(const size_t highest_level)
Create root node for the stiched sub-trees by combining the nodes at highest level creating another l...
Definition: btr0mtib.cc:3015
Scoped_heap m_tuple_heap
Memory heap to store node pointers.
Definition: btr0mtib.h:1339
dberr_t merge(bool sort)
Definition: btr0mtib.cc:2640
trx_t * m_trx
Transaction making the changes.
Definition: btr0mtib.h:1336
void link_right_sibling(const page_no_t l_page_no, const page_no_t r_page_no)
Load the left page and update its FIL_PAGE_NEXT.
Definition: btr0mtib.cc:3145
dberr_t subtree_link_levels(size_t &highest_level)
Stich sub-trees together to form a tree with one or multiple nodes at highest leve.
Definition: btr0mtib.cc:2717
Helper to set wait callbacks for the current scope.
Definition: btr0mtib.h:935
~Wait_callbacks()
Definition: btr0mtib.h:946
Wait_callbacks(Btree_load *btree_load, Function &begin, Function &end)
Definition: btr0mtib.h:940
Btree_load * m_btree_load
Btree Load for the wait callbacks.
Definition: btr0mtib.h:953
std::function< void()> Function
Definition: btr0mtib.h:937
Definition: btr0mtib.h:893
~Btree_load() noexcept
Destructor.
Definition: btr0mtib.cc:1646
dict_index_t * m_index
B-tree index.
Definition: btr0mtib.h:1217
Bulk_flusher m_bulk_flusher
Definition: btr0mtib.h:1268
dberr_t finish(bool is_err, const bool subtree) noexcept
Btree bulk load finish.
Definition: btr0mtib.cc:1984
dberr_t load_root_page(page_no_t last_page_no) noexcept
Set the root page on completion.
Definition: btr0mtib.cc:1860
dberr_t trigger_flusher() const
Trigger flusher thread and check for error.
Definition: btr0mtib.h:987
bool is_tpe_enabled() const
Check if transparent page encryption (TPE) is enabled.
Definition: btr0mtib.cc:2632
Bulk_extent_allocator & m_allocator
Reference to global extent allocator.
Definition: btr0mtib.h:1233
bool is_new_level(size_t level) const
Check if a new level is needed.
Definition: btr0mtib.h:1127
dberr_t check_key_overlap(const Btree_load *r_btree) const
Definition: btr0mtib.cc:3273
Btree_load(dict_index_t *index, trx_t *trx, size_t loader_num, size_t flush_queue_size, Bulk_extent_allocator &allocator) noexcept
Constructor.
Definition: btr0mtib.cc:1629
dict_index_t * index() const
Get the index object.
Definition: btr0mtib.h:991
size_t get_max_flush_queue_size() const
Definition: btr0mtib.h:1200
void block_remove(const page_no_t page_no)
Remove the given block from the internal cache of blocks.
dberr_t insert(dtuple_t *dtuple, size_t level) noexcept
Insert a tuple to a page in a level.
Definition: btr0mtib.cc:1756
dtuple_t * m_prev_tuple
The previous tuple that has been inserted.
Definition: btr0mtib.h:1248
void force_evict(const Page_range_t &range, const bool dirty_is_ok=true)
Evict all the pages in the given range from the buffer pool.
Definition: btr0mtib.cc:2084
std::ostream & print_right_pages(std::ostream &out) const
Definition: btr0mtib.cc:2045
size_t m_root_level
Root page level.
Definition: btr0mtib.h:1225
std::vector< page_no_t, ut::allocator< page_no_t > > m_first_page_nos
First page numbers of each level.
Definition: btr0mtib.h:1133
byte m_fseg_hdr_leaf[FSEG_HEADER_SIZE]
Definition: btr0mtib.h:1148
void block_put(buf_block_t *block)
Add the given block the internal cache of blocks.
dberr_t init()
Initialize.
Definition: btr0mtib.cc:2177
Bulk_extent_allocator::Type m_alloc_type
Page allocation type.
Definition: btr0mtib.h:1210
Page_range_t m_page_range_leaf
Page numbers of the pages that has been allocated in the leaf level.
Definition: btr0mtib.h:1142
void get_root_page_stat(Page_stat &stat)
Get information about root page.
Definition: btr0mtib.cc:2095
size_t m_loader_num
Loader number.
Definition: btr0mtib.h:1253
Level_ctxs m_level_ctxs
Context information for each level of the B-tree.
Definition: btr0mtib.h:1230
page_no_t get_subtree_root() const
Get the root page number of this tree/subtree.
Definition: btr0mtib.h:997
size_t m_stat_n_pages
Number of pages allocated for this B-tree.
Definition: btr0mtib.h:1160
trx_t * m_trx
Transaction id.
Definition: btr0mtib.h:1222
dberr_t flush_blobs() noexcept
Flush the blob pages.
dberr_t open_blob(Blob_context &blob_ctx, lob::ref_t &ref)
Create a blob.
Definition: btr0mtib.h:906
mem_heap_t * m_heap_order
Memory heap to be used for sort order checks.
Definition: btr0mtib.h:1242
dberr_t prepare_space(Page_load *&page_load, size_t level, size_t rec_size) noexcept
Prepare space to insert a tuple.
Definition: btr0mtib.cc:1654
std::list< Page_extent * > m_extents_tracked
Extents that are being tracked.
Definition: btr0mtib.h:1236
void track_page_flush(page_no_t page_no)
Save flushed page numbers for debugging purposes.
Definition: btr0mtib.h:976
std::ostream & print_left_pages(std::ostream &out) const
Definition: btr0mtib.cc:2036
ddl::Compare_key m_compare_key
Function object to compare two tuples.
Definition: btr0mtib.h:1245
bulk::Blob_inserter m_blob_inserter
Blob inserter that will be used to handle all the externally stored fields of InnoDB.
Definition: btr0mtib.h:1265
size_t get_root_level() const
Get the level of the root page.
Definition: btr0mtib.h:1001
dberr_t alloc_extent(Page_range_t &page_range, size_t level)
Allocate an extent.
Definition: btr0mtib.cc:735
bool is_extent_tracked(const Page_extent *page_extent) const
Definition: btr0mtib.h:1342
dberr_t insert_blob(lob::ref_t &ref, const dfield_t *dfield)
Definition: btr0mtib.h:898
bool m_check_order
If true, check if data is inserted in sorted order.
Definition: btr0mtib.h:1239
bool is_tpc_enabled() const
Check if transparent page compression (TPC) is enabled.
Definition: btr0mtib.cc:2622
std::vector< Page_load *, ut::allocator< Page_load * > > Page_loaders
Definition: btr0mtib.h:931
static bool validate_index(dict_index_t *index)
Check size and validate index of limited size.
Definition: btr0mtib.cc:1964
trx_id_t get_trx_id() const
Get the transaction id.
Definition: btr0mtib.cc:1644
void disable_check_order()
If the data is already sorted and checked for duplicates, then we can disable doing it again.
Definition: btr0mtib.h:1206
bool is_interrupted() const
Check if the index build operation has been interrupted.
Definition: btr0mtib.cc:3338
uint64_t m_n_recs
Number of records inserted.
Definition: btr0mtib.h:1214
Wait_callbacks::Function m_fn_wait_begin
Definition: btr0mtib.h:1258
const char * get_table_name() const
Definition: btr0mtib.h:993
void track_extent(Page_extent *page_extent)
All allocated extents registers with Btree_load.
Definition: btr0mtib.cc:2121
void split_rightmost(buf_block_t *block, size_t level, dtuple_t *node_ptr, mtr_t *mtr, size_t &highest_level)
Split the right most block of the tree at the given level.
Definition: btr0mtib.cc:3355
fil_space_t * m_space
Definition: btr0mtib.h:1219
Page_range_t m_page_range_top
Page numbers of the pages that has been allocated in the non-leaf level.
Definition: btr0mtib.h:1146
Wait_callbacks::Function m_fn_wait_end
Definition: btr0mtib.h:1261
void add_to_bulk_flusher(bool finish=false)
Add fully used extents to the bulk flusher.
Definition: btr0mtib.cc:1733
unsigned m_index_online
State of the index.
Definition: btr0mtib.h:1154
byte m_fseg_hdr_top[FSEG_HEADER_SIZE]
Definition: btr0mtib.h:1149
dberr_t page_commit(Page_load *page_load, Page_load *next_page_load, bool insert_father) noexcept
Commit(finish) a page.
Definition: btr0mtib.cc:1588
void split_leftmost(buf_block_t *&block, size_t level, dtuple_t *node_ptr, mtr_t *mtr, size_t &highest_level)
Split the left most block of the tree at the given level.
Definition: btr0mtib.cc:3454
dberr_t fil_io(buf_block_t *block) noexcept
Initiate a direct file write operation.
dberr_t finalize_page_loads(bool is_err, page_no_t &last_page_no) noexcept
Btree page bulk load finish.
Definition: btr0mtib.cc:1828
std::string print_pages_in_level(const size_t level) const
Definition: btr0mtib.cc:1911
void print_tree_pages() const
Definition: btr0mtib.cc:2055
Level_ctx * get_level(size_t level) const
Get the level context object.
Definition: btr0mtib.h:1721
dberr_t close_blob(Blob_context blob_ctx, lob::ref_t &ref)
Indicate that the blob has been completed, so that resources can be removed, and as necessary flushin...
Definition: btr0mtib.h:926
const page_size_t m_page_size
Definition: btr0mtib.h:1255
std::vector< Level_ctx *, ut::allocator< Level_ctx * > > Level_ctxs
Definition: btr0mtib.h:932
buf_block_t * block_get(page_no_t page_no) const noexcept
Search for a BUF_BLOCK_MEMORY block with given page number in the local cache.
std::vector< page_no_t, ut::allocator< page_no_t > > m_last_page_nos
Last page numbers of each level.
Definition: btr0mtib.h:1130
size_t m_stat_n_extents
Number of extents allocated for this B-tree.
Definition: btr0mtib.h:1157
dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data, size_t len)
Write data into the blob.
Definition: btr0mtib.h:916
Definition: btr0mtib.h:479
~Bulk_extent_allocator()
Destructor to ensure thread stop.
Definition: btr0mtib.h:489
Extent_cache m_leaf_extents
Cached leaf extents.
Definition: btr0mtib.h:624
dict_table_t * m_table
Innodb dictionary table object.
Definition: btr0mtib.h:645
std::chrono::microseconds m_allocator_wait_time
Total allocator wait time in micro seconds.
Definition: btr0mtib.h:618
size_t m_allocator_wait_count
Number of times allocator had to wait.
Definition: btr0mtib.h:612
size_t m_concurrency
Number of concurrent consumers.
Definition: btr0mtib.h:651
std::condition_variable m_consumer_condition
Condition variable for extent consumer threads.
Definition: btr0mtib.h:636
bool check(size_t &n_leaf, size_t &n_non_leaf, bool &trigger)
Check if leaf and non-leaf extent cache needs to be filled.
Definition: btr0mtib.cc:2480
std::thread m_thread
Bulk extent allocator.
Definition: btr0mtib.h:606
size_t m_consumer_wait_count
Number of times consumer(s) had to wait.
Definition: btr0mtib.h:609
static constexpr size_t S_BULK_EXTEND_SIZE_MAX
Maximum size by which the tablespace is extended each time.
Definition: btr0mtib.h:523
dberr_t allocate(bool is_leaf, bool alloc_page, Page_range_t &range, std::function< void()> &fn_wait_begin, std::function< void()> &fn_wait_end)
Allocate a page range - currently ans Extent.
Definition: btr0mtib.cc:2364
dberr_t m_error
Error code, protected by m_mutex.
Definition: btr0mtib.h:642
static constexpr size_t S_MAX_RANGES
Upper bound for max ranges.
Definition: btr0mtib.h:520
trx_t * m_trx
Innodb transaction - used for checking interrupt.
Definition: btr0mtib.h:648
Extent_cache m_non_leaf_extents
Cached non-leaf extents.
Definition: btr0mtib.h:627
void allocator_wait() const
Allocator wait function.
Definition: btr0mtib.cc:2500
dberr_t allocate_extent(bool is_leaf, mtr_t &mtr, Page_range_t &range)
Allocate one extent.
Definition: btr0mtib.cc:2358
bool is_interrupted()
Definition: btr0mtib.cc:2308
dberr_t allocate_page(bool is_leaf, Page_range_t &range)
Allocate one page.
Definition: btr0mtib.cc:2312
dberr_t run()
Extent thread executor.
Definition: btr0mtib.cc:2568
void start()
Definition: btr0mtib.cc:2275
std::mutex m_mutex
This mutex protects the m_queue.
Definition: btr0mtib.h:630
Type m_type
Page range type.
Definition: btr0mtib.h:621
Type
Definition: btr0mtib.h:481
uint64_t init(dict_table_t *table, trx_t *trx, size_t size, size_t num_threads, bool in_pages)
Check size and set extent allocator size parameters.
Definition: btr0mtib.cc:2205
bool m_stop
Flag to indicate if the bulk allocator thread should stop.
Definition: btr0mtib.h:639
std::chrono::microseconds m_consumer_wait_time
Total consumer wait time in micro seconds.
Definition: btr0mtib.h:615
dberr_t allocate_extents(bool is_leaf, size_t num_extents)
Allocate extents and fill the cache.
Definition: btr0mtib.cc:2519
void stop()
Stop extent allocator thread, if active.
Definition: btr0mtib.cc:2283
std::condition_variable m_allocator_condition
Condition variable for allocator thread.
Definition: btr0mtib.h:633
Definition: btr0mtib.h:654
void do_work(fil_node_t *node, void *iov, size_t iov_size)
Do the actual work of flushing.
Definition: btr0mtib.cc:121
dberr_t check_and_notify() const
Check for flusher error and wake up flusher thread.
Definition: btr0mtib.cc:148
dberr_t m_error
Error code, protected by m_mutex.
Definition: btr0mtib.h:742
void add_to_free_queue(Page_extent *page_extent)
Definition: btr0mtib.cc:169
size_t m_pages_flushed
Number of pages flushed.
Definition: btr0mtib.h:768
space_id_t m_space_id
Bulk flusher is specific to a tablespace for now.
Definition: btr0mtib.h:771
std::atomic< bool > m_is_error
Set if error is encountered during flush.
Definition: btr0mtib.h:739
std::atomic< bool > m_stop
Flag to indicate if the bulk flusher thread should stop.
Definition: btr0mtib.h:736
dberr_t get_error() const
Definition: btr0mtib.cc:83
std::vector< Page_extent * > m_free_queue
This queue is protected by the m_free_mutex.
Definition: btr0mtib.h:728
dberr_t run()
Thread main function.
Definition: btr0mtib.cc:220
size_t m_id
Flusher ID.
Definition: btr0mtib.h:774
bool is_error() const
Definition: btr0mtib.h:692
Page_extent * get_free_extent()
Definition: btr0mtib.cc:159
std::mutex m_free_mutex
This mutex protects the m_free_queue.
Definition: btr0mtib.h:731
bool should_i_stop() const
Check if the bulk flush thread should stop working.
Definition: btr0mtib.h:709
size_t m_n_sleep
Number of times slept.
Definition: btr0mtib.h:756
std::mutex m_mutex
This mutex protects the m_queue.
Definition: btr0mtib.h:721
~Bulk_flusher()
Destructor.
Definition: btr0mtib.cc:97
void set_error(dberr_t error_code)
Set error code.
Definition: btr0mtib.cc:88
std::vector< Page_extent * > m_queue
This queue is protected by the m_mutex.
Definition: btr0mtib.h:718
std::thread m_flush_thread
Bulk flusher thread.
Definition: btr0mtib.h:753
size_t m_max_queue_size
Maximum queue size, defaults to 4.
Definition: btr0mtib.h:765
void start(space_id_t space_id, size_t index, size_t queue_size)
Start a new thread to do the flush work.
Definition: btr0mtib.cc:69
std::chrono::microseconds m_wait_time
Total sleep time in micro seconds.
Definition: btr0mtib.h:759
bool is_work_available()
Check if work is available for the bulk flusher thread.
Definition: btr0mtib.cc:204
std::vector< Page_extent * > m_priv_queue
Private queue (private to the bulk flush thread) containing the extents to flush.
Definition: btr0mtib.h:750
void wait_to_stop()
Wait till the bulk flush thread stops.
Definition: btr0mtib.cc:108
std::vector< page_no_t > m_flushed_page_nos
Vector of page numbers that are flushed by this Bulk_flusher object.
Definition: btr0mtib.h:779
size_t get_max_queue_size() const
Get the maximum allowed queue size.
Definition: btr0mtib.h:686
std::condition_variable m_condition
Condition variable to wait upon.
Definition: btr0mtib.h:724
void info()
Print useful information to the server log file while exiting.
Definition: btr0mtib.cc:2190
void wait()
When no work is available, put the thread to sleep.
Definition: btr0mtib.cc:281
void add(Page_extent *page_extent, std::function< void()> &fn_wait_begin, std::function< void()> &fn_wait_end)
Add a page extent to the bulk flush queue.
Definition: btr0mtib.cc:174
static constexpr std::chrono::milliseconds s_sleep_duration
The sleep duration in milliseconds.
Definition: btr0mtib.h:762
The proper function call sequence of Page_load is as below: – Page_load::init – Page_load::insert – P...
Definition: btr0mtib.h:1357
dberr_t init_mem(const page_no_t new_page_no, Page_extent *page_extent) noexcept
Definition: btr0mtib.cc:996
dberr_t store_ext(const big_rec_t *big_rec, Rec_offsets offsets) noexcept
Store external record Since the record is not logged yet, so we don't log update to the record.
void set_level(size_t level) noexcept
Set the level of this page.
Definition: btr0mtib.h:1558
buf_block_t * get_block()
Definition: btr0mtib.h:1624
space_id_t space() const noexcept
Get the table space ID.
Definition: btr0mtib.h:1714
void rollback() noexcept
Commit mtr for a page.
Definition: btr0mtib.cc:1416
dberr_t init_mem_blob(const page_no_t page_no, Page_extent *page_extent) noexcept
Initialize a memory block to be used for storing blobs.
Definition: btr0mtib.cc:960
bool is_corrupted() const
Check if page is corrupted.
Definition: btr0mtib.cc:326
trx_id_t m_trx_id
The transaction id.
Definition: btr0mtib.h:1647
byte * m_heap_top
The heap top in page for next insert.
Definition: btr0mtib.h:1668
size_t get_level() const noexcept
Get page level.
Definition: btr0mtib.h:1555
rec_t * m_last_slotted_rec
Last record assigned to a slot.
Definition: btr0mtib.h:1690
bool make_ext(dtuple_t *tuple)
Store externally the first possible field of the given tuple.
Definition: btr0mtib.cc:1540
dict_index_t * index()
Definition: btr0mtib.h:1622
void set_level_ctx(Level_ctx *level_ctx)
Set the level context object for this page load.
Definition: btr0mtib.h:1612
void set_trx_id(const trx_id_t trx_id)
Set the transaction id.
Definition: btr0mtib.h:1411
size_t copy_to(std::vector< Page_load * > &to_pages)
Distribute all records from this page to the given pages.
Definition: btr0mtib.cc:1454
size_t copy_records(const rec_t *first_rec) noexcept
Copy given and all following records.
Definition: btr0mtib.cc:1485
void set_flush_observer(Flush_observer *observer)
Set the flush observer.
Definition: btr0mtib.h:1419
void set_page_extent(Page_extent *page_extent)
Definition: btr0mtib.h:1626
size_t get_rec_no() const
Get record no.
Definition: btr0mtib.h:1561
trx_id_t get_trx_id() const
Get the current transaction identifier.
Definition: btr0mtib.h:1415
void set_min_rec_flag()
Set the REC_INFO_MIN_REC_FLAG on the first user record in this page.
Definition: btr0mtib.cc:2065
page_no_t get_page_no() const noexcept
Get the page number of this page load object.
Definition: btr0mtib.h:1533
void set_next(page_no_t next_page_no) noexcept
Set next page.
Definition: btr0mtib.cc:1504
size_t get_page_size() const noexcept
Get the physical page size of the underlying tablespace.
Definition: btr0mtib.h:1716
dict_index_t * m_index
The index B-tree.
Definition: btr0mtib.h:1641
size_t m_slotted_rec_no
Number of records assigned to slots.
Definition: btr0mtib.h:1693
dberr_t insert(const dtuple_t *tuple, const big_rec_t *big_rec, size_t rec_size) noexcept
Insert a tuple in the page.
Definition: btr0mtib.cc:1299
Flush_observer * m_flush_observer
Flush observer.
Definition: btr0mtib.h:1687
bool verify_space_id() const
Obtain tablespace id from the frame and the buffer block and ensure that they are the same.
Definition: btr0mtib.cc:3343
void free()
Free the memory block.
Definition: btr0mtib.cc:2113
uint64_t m_modify_clock
The modify clock value of the buffer block when the block is re-pinned.
Definition: btr0mtib.h:1684
void set_prev(page_no_t prev_page_no) noexcept
Set previous page.
Definition: btr0mtib.cc:1508
bool has_level_ctx() const
Check if this page load object contains a level context object.
Definition: btr0mtib.h:1617
Page_load(dict_index_t *index, Btree_load *btree_load)
Ctor.
Definition: btr0mtib.cc:928
size_t m_rec_no
User record no.
Definition: btr0mtib.h:1671
std::atomic_bool m_is_cached
true iff the the Page load is cached.
Definition: btr0mtib.h:1705
static void drop(Page_load *page_load)
Release the page loader.
Definition: btr0mtib.cc:709
bool is_memory() const
Check if state of this page is BUF_BLOCK_MEMORY.
Definition: btr0mtib.h:1378
bool is_leaf() const
Definition: btr0mtib.h:1423
bool is_space_available(size_t rec_size) const noexcept
Check if required space is available in the page for the rec to be inserted.
Definition: btr0mtib.cc:1516
void reset() noexcept
Reset this object so that Page_load::init() can be called again on this object.
Definition: btr0mtib.cc:1134
size_t get_data_size() const
Definition: btr0mtib.h:1574
bool need_ext(const dtuple_t *tuple, size_t rec_size) const noexcept
Check whether the record needs to be stored externally.
Definition: btr0mtib.cc:1572
size_t m_level
The page level in B-tree.
Definition: btr0mtib.h:1662
rec_t * m_cur_rec
The current rec, just before the next insert rec.
Definition: btr0mtib.h:1656
dberr_t alloc() noexcept
Allocate a page for this Page_load object.
Definition: btr0mtib.cc:1087
void set_page_no(const page_no_t page_no)
Set the page number of this object.
Definition: btr0mtib.cc:919
size_t m_reserved_space
The reserved space for fill factor.
Definition: btr0mtib.h:1677
mem_heap_t * m_heap
Memory heap for internal allocation.
Definition: btr0mtib.h:1638
static Page_load * create(Btree_load *btree_load, Page_extent *page_extent)
A static member function to create this object.
Definition: btr0mtib.cc:699
page_id_t get_page_id() const noexcept
Definition: btr0mtib.h:1535
void latch() noexcept
Start mtr and latch block.
Page_extent * m_page_extent
Definition: btr0mtib.h:1702
bool is_cached() const
Definition: btr0mtib.h:1634
page_t * get_page() noexcept
Definition: btr0mtib.h:1568
bool is_index_locked() noexcept
Check if index is X locked.
Definition: btr0mtib.cc:1579
Page_load(dict_index_t *index, trx_id_t trx_id, page_no_t page_no, size_t level, Flush_observer *observer, Btree_load *btree_load=nullptr) noexcept
Constructor.
Definition: btr0mtib.h:1396
void set_top_seg(const fseg_header_t *hdr)
Definition: btr0mtib.h:1431
dberr_t reinit() noexcept
Re-initialize this page.
Definition: btr0mtib.cc:1066
~Page_load() noexcept
Destructor.
Definition: btr0mtib.cc:3327
void set_leaf_seg(const fseg_header_t *hdr)
Definition: btr0mtib.h:1428
bool is_min_rec_flag() const
Definition: btr0mtib.cc:2067
dberr_t commit() noexcept
Commit mtr for a page.
Definition: btr0mtib.cc:1386
size_t copy_all(const page_t *src_page) noexcept
Copy all records from page.
Definition: btr0mtib.cc:1445
size_t m_free_space
The free space left in the page.
Definition: btr0mtib.h:1674
void set_cached()
Mark the Page load as cached.
Definition: btr0mtib.h:1631
const bool m_is_comp
Flag: is page in compact format.
Definition: btr0mtib.h:1665
Btree_load * m_btree_load
Definition: btr0mtib.h:1698
page_no_t get_prev() noexcept
Get previous page (FIL_PAGE_PREV).
Definition: btr0mtib.cc:1512
mtr_t * m_mtr
The min-transaction.
Definition: btr0mtib.h:1644
const page_t * get_page() const noexcept
Get page.
Definition: btr0mtib.h:1564
bool m_modified
Page modified flag.
Definition: btr0mtib.h:1696
buf_block_t * m_block
The buffer block.
Definition: btr0mtib.h:1650
void print_child_page_nos() noexcept
Print the child page numbers.
Definition: btr0mtib.cc:1429
page_no_t m_page_no
The page no.
Definition: btr0mtib.h:1659
Level_ctx * m_level_ctx
Definition: btr0mtib.h:1700
dberr_t init() noexcept
Initialize members and allocate page if needed and start mtr.
Definition: btr0mtib.cc:1166
size_t m_total_data
Total data in the page.
Definition: btr0mtib.h:1680
void init_for_writing()
Definition: btr0mtib.cc:341
void finish() noexcept
Mark end of insertion to the page.
Definition: btr0mtib.cc:1331
page_t * m_page
The page.
Definition: btr0mtib.h:1653
dtuple_t * get_node_ptr() noexcept
Get node pointer.
Definition: btr0mtib.h:1710
Used to insert many blobs into InnoDB.
Definition: btr0mtib.h:788
Page_load * alloc_data_page()
Allocate a data page.
Definition: btr0mtib.cc:3919
Page_range_t m_page_range_first
Definition: btr0mtib.h:865
Page_load * alloc_first_page()
Allocate a LOB first page.
Definition: btr0mtib.cc:3899
Page_load * alloc_index_page()
Allocate a LOB index page.
Definition: btr0mtib.cc:3903
ut::Object_cache< Page_load > m_page_load_cache
Cache of Page_load objects.
Definition: btr0mtib.h:879
Page_extent * alloc_free_extent()
Definition: btr0mtib.cc:3858
Blob_inserter(Btree_load &btree_load)
Constructor.
Definition: btr0mtib.cc:3699
Page_load * alloc_page_from_extent(Page_extent *&m_page_extent)
Definition: btr0mtib.cc:3867
trx_id_t get_trx_id() const
Get the current transaction id.
Definition: btr0mtib.h:1809
~Blob_inserter()
Definition: btr0mtib.cc:3946
Page_extent * m_page_extent_first
Page extent from which to allocate first pages of blobs.
Definition: btr0mtib.h:863
dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data, size_t len)
Write data into the blob.
Definition: btr0mtib.cc:3824
ut::Object_cache< Page_extent > m_page_extent_cache
Cache of Page_extent objects.
Definition: btr0mtib.h:882
dberr_t close_blob(Blob_context blob_ctx, lob::ref_t &ref)
Indicate that the blob has been completed, so that resources can be removed, and as necessary flushin...
Definition: btr0mtib.cc:3830
void finish()
Definition: btr0mtib.cc:3927
std::list< Page_extent * > m_index_extents
Page extent from which to allocate index pages of blobs.
Definition: btr0mtib.h:873
Blob_context m_blob
The current blob being inserted.
Definition: btr0mtib.h:876
dberr_t insert_blob(lob::ref_t &ref, const dfield_t *dfield)
Definition: btr0mtib.h:802
dberr_t open_blob(Blob_context &blob_ctx, lob::ref_t &ref)
Create a blob.
Definition: btr0mtib.cc:3819
dberr_t init()
Initialize by allocating necessary resources.
Definition: btr0mtib.cc:3706
Btree_load & m_btree_load
Definition: btr0mtib.h:859
ut::unique_ptr< Blob_handle > m_blob_handle
Only one blob handle per sub-tree.
Definition: btr0mtib.h:885
Page_extent * m_page_extent_data
Page extent from which to allocate data pages of blobs.
Definition: btr0mtib.h:869
We use Flush_observer to track flushing of non-redo logged pages in bulk create index(btr0load....
Definition: buf0flu.h:274
The proper function call sequence of Page_load is as below: – Page_load::init – Page_load::insert – P...
Definition: btr0load.cc:54
A helper RAII wrapper for otherwise difficult to use sequence of:
Definition: rem0rec.h:292
page_id_t id
Page id.
Definition: buf0buf.h:1376
Page identifier.
Definition: buf0types.h:207
Page size descriptor.
Definition: page0size.h:50
size_t physical() const
Retrieve the physical page size (on-disk).
Definition: page0size.h:121
A utility class which, if inherited from, prevents the descendant class from being copied,...
Definition: ut0class_life_cycle.h:41
A class to manage objects of type T.
Definition: ut0object_cache.h:40
const char * p
Definition: ctype-mb.cc:1225
dberr_t
Definition: db0err.h:39
@ DB_SUCCESS
Definition: db0err.h:43
DDL key comparison.
Data dictionary system.
static bool dict_table_is_comp(const dict_table_t *table)
Check whether the table uses the compact page format.
static ulint dict_index_is_spatial(const dict_index_t *index)
Check whether the index is a Spatial Index.
constexpr page_no_t FIL_NULL
'null' (undefined) page offset in the context of file spaces
Definition: fil0fil.h:1156
#define FSP_EXTENT_SIZE
File space extent size in pages page size | file space extent size -------—+--------------------— 4 K...
Definition: fsp0types.h:64
constexpr uint32_t FSEG_HEADER_SIZE
Length of the file system header, in bytes.
Definition: fsp0types.h:94
byte fseg_header_t
Data type for file segment header.
Definition: fsp0types.h:85
#define free(A)
Definition: lexyy.cc:915
For bulk loading large objects.
Implements the large objects (LOB) module.
Definition: btr0mtib.cc:56
void * Blob_context
Definition: btr0mtib.h:60
void bulk_load_enable_slow_io_debug()
Definition: btr0mtib.cc:60
void bulk_load_disable_slow_io_debug()
Definition: btr0mtib.cc:61
std::ostream & operator<<(std::ostream &out, const Page_extent &obj)
Definition: btr0mtib.h:321
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Used for bulk load of data.
Definition: fut0lst.cc:411
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
static Value err()
Create a Value object that represents an error condition.
Definition: json_binary.cc:908
const char * begin(const char *const c)
Definition: base64.h:44
size_t size(const char *const c)
Definition: base64.h:46
Cursor end()
A past-the-end Cursor.
Definition: rules_table_service.cc:192
Definition: gcs_xcom_synode.h:64
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2876
void delete_(T *ptr) noexcept
Releases storage which has been dynamically allocated through any of the ut::new*() variants.
Definition: ut0new.h:811
std::conditional_t< !std::is_array< T >::value, std::unique_ptr< T, detail::Deleter< T > >, std::conditional_t< detail::is_unbounded_array_v< T >, std::unique_ptr< T, detail::Array_deleter< std::remove_extent_t< T > > >, void > > unique_ptr
The following is a common type that is returned by all the ut::make_unique (non-aligned) specializati...
Definition: ut0new.h:2440
The page cursor.
static ulint page_get_free_space_of_empty(bool comp)
Calculates free space if a page is emptied.
static ulint page_get_data_size(const page_t *page)
Returns the sum of the sizes of the records in the record list excluding the infimum and supremum rec...
constexpr uint32_t PAGE_HEADER
index page header starts at this offset
Definition: page0types.h:53
constexpr uint32_t PAGE_BTR_SEG_LEAF
file segment header for the leaf pages in a B-tree: defined only on the root page of a B-tree,...
Definition: page0types.h:90
constexpr uint32_t PAGE_BTR_SEG_TOP
Definition: page0types.h:98
byte page_t
Type of the index page.
Definition: page0types.h:152
byte rec_t
Definition: rem0types.h:41
Function object to compare two Btree_load objects.
Definition: btr0mtib.h:1791
dict_index_t * m_index
Definition: btr0mtib.h:1794
bool operator()(const Btree_load *l_btree, const Btree_load *r_btree)
Definition: btr0mtib.cc:2132
Btree_load_compare(dict_index_t *index)
Definition: btr0mtib.h:1792
bool is_empty() const
Definition: btr0mtib.h:531
size_t m_max_range
Maximum number of ranges to pre-allocate.
Definition: btr0mtib.h:559
void init(size_t max_range)
Initialize cache.
Definition: btr0mtib.cc:2197
bool check(size_t &num_alloc, size_t &num_free) const
Check for number of extents to be allocated and cached.
Definition: btr0mtib.cc:2461
std::array< Page_range_t, S_MAX_RANGES > m_ranges
Cached page ranges already allocated to the segment.
Definition: btr0mtib.h:556
std::atomic< size_t > m_num_consumed
Total number of ranges allocated.
Definition: btr0mtib.h:565
void set_range(size_t index, Page_range_t &range)
Set allocated range(extent) in cache.
Definition: btr0mtib.cc:2452
std::atomic< size_t > m_num_allocated
Total number of ranges allocated.
Definition: btr0mtib.h:562
bool is_full() const
Definition: btr0mtib.h:534
bool get_range(Page_range_t &range, bool &alloc_trigger)
Get one page range from the cache.
Definition: btr0mtib.cc:2432
Context information for each level.
Definition: btr0mtib.h:358
size_t m_stat_n_extents
Number of extents allocated at this level.
Definition: btr0mtib.h:462
buf_block_t * alloc(const page_no_t new_page_no) noexcept
Allocate private memory buffer (BUF_BLOCK_MEMORY) block for given page number.
Definition: btr0mtib.cc:889
dict_index_t * m_index
The index which is being built.
Definition: btr0mtib.h:447
void build_page_cache()
Build page loader cache for current exent.
Definition: btr0mtib.cc:802
bool load_extent_from_cache()
Load one extent from extent cache.
Definition: btr0mtib.cc:749
dberr_t init()
Initialize.
Definition: btr0mtib.cc:842
Btree_load * m_btree_load
A back pointer to conceptually higher level btree load object.
Definition: btr0mtib.h:456
void set_current_page_load(Page_load *sibling)
Definition: btr0mtib.h:475
page_no_t m_last_page
The page_no of the last page in this level.
Definition: btr0mtib.h:444
~Level_ctx()
Destructor.
Definition: btr0mtib.cc:3325
const size_t m_level
The B-tree level whose context information is stored in this obj.
Definition: btr0mtib.h:450
void build_extent_cache()
Build the extent cache.
Definition: btr0mtib.cc:816
void free_page_load()
Free the current page load.
Definition: btr0mtib.cc:730
Page_load * create_page_load()
Definition: btr0mtib.cc:716
std::vector< page_no_t > m_pages_allocated
Definition: btr0mtib.h:469
trx_id_t get_trx_id() const
Definition: btr0mtib.h:1750
static void destroy(Level_ctx *obj)
Static member function to destroy a Level_ctx object.
Definition: btr0mtib.cc:685
std::vector< Page_extent * > m_cached_extents
Pre allocated extents to prevent repeated allocation and free.
Definition: btr0mtib.h:438
dberr_t alloc_page_num(page_no_t &page_no)
Allocate a page number.
Definition: btr0mtib.cc:618
Level_ctx(dict_index_t *index, size_t level, Btree_load *btree_load)
Constructor.
Definition: btr0mtib.h:377
bool m_extent_full
True if the current extent is full.
Definition: btr0mtib.h:465
dberr_t alloc_extent()
Allocate one extent in the relevant file segment.
Definition: btr0mtib.cc:655
size_t m_stat_n_pages
Number of pages allocated at this level.
Definition: btr0mtib.h:459
bool is_page_tracked(const page_no_t &page_no) const
Definition: btr0mtib.cc:649
Page_load * m_page_load
The Page_load of the current page being loaded.
Definition: btr0mtib.h:453
Page_load * get_page_load_from_cache()
Get a free page loader from cache.
Definition: btr0mtib.cc:784
Page_extent * m_page_extent
The current extent that is being loaded.
Definition: btr0mtib.h:421
page_no_t m_first_page
The page_no of the first page in this level.
Definition: btr0mtib.h:441
Page_load * get_page_load() const
Definition: btr0mtib.h:473
bool is_leaf() const
Check if this is leaf level.
Definition: btr0mtib.h:392
static Level_ctx * create(dict_index_t *index, size_t level, Btree_load *btree_load)
Static member function construct a Level_ctx object.
Definition: btr0mtib.cc:677
Allocate, use, manage and flush one extent pages (FSP_EXTENT_SIZE).
Definition: btr0mtib.h:67
Page_extent(Btree_load *btree_load, const bool is_leaf)
Constructor.
Definition: btr0mtib.h:1758
static void drop(Page_extent *extent)
Release the page extent.
Definition: btr0mtib.h:1777
bool is_blob() const
Check if this is a blob extent.
Definition: btr0mtib.h:231
void set_cached()
Mark the extent as cached.
Definition: btr0mtib.h:208
size_t m_next_cached_page_load_index
Next cached page load index.
Definition: btr0mtib.h:254
void get_page_numbers(std::vector< page_no_t > &page_numbers) const
Definition: btr0mtib.h:262
std::atomic_bool m_is_free
true if the cached entry is free to be used.
Definition: btr0mtib.h:250
std::vector< Page_load * > m_page_loads
All the page loaders of the used pages.
Definition: btr0mtib.h:87
bool is_null() const
Definition: btr0mtib.h:121
bool is_valid() const
Check if the range is valid.
Definition: btr0mtib.h:302
bool m_is_blob
True if this extent is used for blobs.
Definition: btr0mtib.h:257
dberr_t flush(fil_node_t *node, void *iov, size_t iov_size)
Flush the used pages to disk.
Definition: btr0mtib.cc:537
bool is_any_used() const
Check if there are any pages used.
Definition: btr0mtib.h:153
page_no_t alloc()
Allocate a page number.
Definition: btr0mtib.h:334
void init()
Initialize the next page number to be allocated.
Definition: btr0mtib.h:344
std::ostream & print(std::ostream &out) const
Definition: btr0mtib.h:313
dberr_t destroy()
Free all resources.
Definition: btr0mtib.cc:608
std::pair< page_no_t, page_no_t > Page_range_t
Definition: btr0mtib.h:68
std::vector< Page_load * > m_cached_page_loads
Cached page loads.
Definition: btr0mtib.h:252
size_t used_pages() const
Calculate the number of used pages.
Definition: btr0mtib.h:109
void set_page_load(page_no_t page_no, Page_load *page_load)
Member of Page_extent.
Definition: btr0mtib.h:269
page_no_t page_count() const
Number of pages in this extent.
Definition: btr0mtib.h:353
dberr_t flush_one_by_one(fil_node_t *node)
Flush one page at a time.
Definition: btr0mtib.cc:421
dberr_t bulk_flush(fil_node_t *node, void *iov, size_t iov_size)
Flush 1 extent pages at a time.
Definition: btr0mtib.cc:522
page_no_t m_page_no
Next page number to be used.
Definition: btr0mtib.h:80
void set_blob()
Mark that this extent is used for blobs.
Definition: btr0mtib.h:227
void reset_range(const Page_range_t &range)
Reset the range with the given value.
Definition: btr0mtib.h:325
std::atomic_bool m_is_owned_by_bulk_flusher
True if this extent has been handed over to the bulk flusher.
Definition: btr0mtib.h:238
Page_range_t m_range
Page numbers of the pages that has been allocated in this extent.
Definition: btr0mtib.h:84
bool is_fully_used() const
Check if no more pages are there to be used.
Definition: btr0mtib.h:148
bool is_free() const
Definition: btr0mtib.h:215
void destroy_cached()
Free any cached page load entries.
Definition: btr0mtib.cc:600
void append(Page_load *page_load)
Save a page_load.
Definition: btr0mtib.h:1735
Page_range_t pages_to_free() const
size_t last() const
Get the index of the first unused page load.
Definition: btr0mtib.h:115
void set_state(bool free)
Set and unset free state of a cached extent.
Definition: btr0mtib.h:212
std::atomic_bool m_is_cached
true iff the the extent is cached.
Definition: btr0mtib.h:248
void free_memory_blocks()
Free the BUF_BLOCK_MEMORY blocks used by this extent.
Definition: btr0mtib.h:1802
bool is_btree_load_nullptr() const
Definition: btr0mtib.h:89
bool is_cached() const
Definition: btr0mtib.h:218
void reset_cached_page_loads()
Reset page load cache to free all.
Definition: btr0mtib.h:221
space_id_t space() const
Definition: btr0mtib.h:1754
static Page_extent * create(Btree_load *btree_load, const bool is_leaf, const bool is_blob)
Create an object of type Page_extent in the heap.
Definition: btr0mtib.h:1766
Page_load * get_page_load(page_no_t page_no)
Member of Page_extent.
Definition: btr0mtib.h:284
~Page_extent()
Destructor.
Definition: btr0mtib.h:294
Btree_load * m_btree_load
Definition: btr0mtib.h:242
bool m_is_leaf
true if this extent belongs to leaf segment.
Definition: btr0mtib.h:245
Information about a buffer page.
Definition: btr0mtib.h:1727
size_t m_n_recs
Number of user records in the page.
Definition: btr0mtib.h:1729
size_t m_data_size
Number of bytes of data.
Definition: btr0mtib.h:1732
Heap wrapper that destroys the heap instance when it goes out of scope.
Definition: mem0mem.h:439
Storage format for overflow data in a big record, that is, a clustered index record which needs exter...
Definition: data0data.h:852
The buffer control block structure.
Definition: buf0buf.h:1747
buf_page_t page
page information; this must be the first field, so that buf_pool->page_hash can point to buf_page_t o...
Definition: buf0buf.h:1753
bool is_memory() const noexcept
Definition: buf0buf.h:1993
Compare the keys of an index.
Definition: ddl0impl-compare.h:41
Structure for an SQL data field.
Definition: data0data.h:617
unsigned len
data length; UNIV_SQL_NULL if SQL null
Definition: data0data.h:623
void * data
pointer to data
Definition: data0data.h:618
Data structure for an index.
Definition: dict0mem.h:1041
unsigned space
space where the index tree is placed
Definition: dict0mem.h:1058
dict_table_t * table
back pointer to table
Definition: dict0mem.h:1055
page_size_t get_page_size() const
Get the page size of the tablespace to which this index belongs.
Definition: dict0mem.cc:895
Data structure for a database table.
Definition: dict0mem.h:1904
table_name_t name
Table name.
Definition: dict0mem.h:1979
Structure for an SQL data tuple of fields (logical record)
Definition: data0data.h:696
File node of a tablespace or the log data space.
Definition: fil0fil.h:155
Tablespace or log data space.
Definition: fil0fil.h:235
The struct 'lob::ref_t' represents an external field reference.
Definition: lob0lob.h:198
The info structure stored at the beginning of a heap block.
Definition: mem0mem.h:302
Mini-transaction handle and buffer.
Definition: mtr0mtr.h:177
Definition: gen_lex_token.cc:149
char * m_name
The name in internal representation.
Definition: dict0mem.h:467
Definition: trx0trx.h:675
ib_id_t trx_id_t
Transaction identifier (DB_TRX_ID, DATA_TRX_ID)
Definition: trx0types.h:138
#define IF_DEBUG(...)
Definition: univ.i:674
unsigned long int ulint
Definition: univ.i:406
Utilities related to class lifecycle.
#define UT_LOCATION_HERE
Definition: ut0core.h:73
#define ut_ad(EXPR)
Debug assertion.
Definition: ut0dbg.h:105
#define ut_a(EXPR)
Abort execution if EXPR does not evaluate to nonzero.
Definition: ut0dbg.h:93
Dynamic memory allocation routines and custom allocators specifically crafted to support memory instr...
#define UT_NEW_THIS_FILE_PSI_KEY
Definition: ut0new.h:566
Manage a cache of objects.