MySQL 9.3.0
Source Code Documentation
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
btr0mtib.h
Go to the documentation of this file.
1/*****************************************************************************
2
3Copyright (c) 2023, 2025, Oracle and/or its affiliates.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
8
9This program is designed to work with certain software (including
10but not limited to OpenSSL) that is licensed under separate terms,
11as designated in a particular file or component or in included license
12documentation. The authors of MySQL hereby grant you an additional
13permission to link the program and your derivative works with the
14separately licensed software that they have either included with
15the program or referenced in the documentation.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
20for more details.
21
22You should have received a copy of the GNU General Public License along with
23this program; if not, write to the Free Software Foundation, Inc.,
2451 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25
26*****************************************************************************/
27
28/** @file include/btr0mtib.h
29
30 Multi Threaded Index Build (MTIB) using BUF_BLOCK_MEMORY and dedicated
31 Bulk_flusher threads.
32
33 Created 09/Feb/2023 Annamalai Gurusami
34 *************************************************************************/
35
36#ifndef btr0mtib_h
37#define btr0mtib_h
38
39#include <stddef.h>
40#include <vector>
41
42#include "btr0load.h"
43#include "ddl0impl-compare.h"
44#include "dict0dict.h"
45#include "lob0bulk.h"
46#include "lob0lob.h"
47#include "page0cur.h"
48#include "ut0class_life_cycle.h"
49#include "ut0new.h"
50#include "ut0object_cache.h"
51
52/* The Btree_multi namespace is used for multi-threaded parallel index build. */
53namespace Btree_multi {
54
55// Forward declaration.
56class Page_load;
57class Btree_load;
58struct Page_stat;
59
60using Blob_context = void *;
61
62namespace bulk {
63class Blob_inserter;
64} // namespace bulk
65
66/** Allocate, use, manage and flush one extent pages (FSP_EXTENT_SIZE). */
68 using Page_range_t = std::pair<page_no_t, page_no_t>;
69
70 /** Constructor.
71 @param[in] btree_load B-tree loader object.
72 @param[in] is_leaf true if this is part of leaf segment, false if this is
73 part of non-leaf (or top) segment. */
74 Page_extent(Btree_load *btree_load, const bool is_leaf);
75
76 /** Destructor. */
78
79 /** Next page number to be used. */
81
82 /** Page numbers of the pages that has been allocated in this extent.
83 The page range is [p1, p2), where p2 is not included. */
85
86 /** All the page loaders of the used pages. */
87 std::vector<Page_load *> m_page_loads;
88
89 bool is_btree_load_nullptr() const { return m_btree_load == nullptr; }
90
91 public:
92 /** Create an object of type Page_extent in the heap. */
93 static Page_extent *create(Btree_load *btree_load, const bool is_leaf,
94 const bool is_blob);
95
96 /** Release the page extent. Delete if not cached.
97 @param[in] extent extent to release */
98 static void drop(Page_extent *extent);
99
100 /** Number of pages in this extent. */
101 page_no_t page_count() const;
102
103 /** Reset the range with the given value.
104 @param[in] range new range value to be used. */
105 void reset_range(const Page_range_t &range);
106
107 /** Calculate the number of used pages.
108 return the number of used pages. */
109 size_t used_pages() const { return m_page_no - m_range.first; }
110
111 void get_page_numbers(std::vector<page_no_t> &page_numbers) const;
112
113 /** Get the index of the first unused page load.
114 @return index of the first unused page load. */
115 size_t last() const { return m_page_no - m_range.first; }
116
117 /** Check if the range is valid.
118 @return true if the range is valid, false otherwise. */
119 bool is_valid() const;
120
121 bool is_null() const {
122 return (m_range.first == FIL_NULL) && (m_range.second == FIL_NULL);
123 }
124
125 public:
126 /** Member of Page_extent. The index of page_load objects in the m_page_loads
127 corresponds to the page_no in the m_range. Here, check if a page_no already
128 has a Page_load object.
129 @param[in] page_no page_no for which we are looking for Page_load obj.
130 @return Page_load object if available, nullptr otherwise. */
132
133 /** Member of Page_extent. Associate the given page_no and the page load
134 object.
135 @param[in] page_no page number to associate.
136 @param[in] page_load page load object to associate. */
137 void set_page_load(page_no_t page_no, Page_load *page_load);
138
140
141 /** Initialize the next page number to be allocated. The page range should
142 have been already initialized. */
143 void init();
144
145 /** Check if no more pages are there to be used.
146 @return true if the page extent is completely used.
147 @return false if the page extent has more pages to be used. */
148 bool is_fully_used() const { return m_page_no == m_range.second; }
149
150 /** Check if there are any pages used.
151 @return true if at least one page is used.
152 @return false if no pages are used in this extent.*/
153 bool is_any_used() const {
154 ut_ad(m_page_no == m_range.first || m_page_loads.size() > 0);
155 return m_page_no > m_range.first;
156 }
157
158 public:
159 /** Allocate a page number. */
161
162 /** Save a page_load. */
163 void append(Page_load *page_load);
164
165 /** Flush the used pages to disk. It also frees the unused pages back to the
166 segment.
167 @param[in,out] node space file node
168 @param[in,out] iov vector IO array
169 @param[in] iov_size vector IO array size
170 @return On success, return DB_SUCCESS. */
171 dberr_t flush(fil_node_t *node, void *iov, size_t iov_size);
172
173 /** Flush one page at a time. This can be used when scatter/gather i/o is
174 not available for use.
175 @param[in,out] node space file node
176 @return On success, return DB_SUCCESS. */
178
179 /** Flush 1 extent pages at a time. Internally it will call OS dependent
180 API (either bulk_flush_win() on Windows or bulk_flush_linux() on other
181 operating systems.
182 @param[in,out] node space file node
183 @param[in,out] iov vector IO array
184 @param[in] iov_size vector IO array size
185 @return DB_SUCCESS on success, error code on failure. */
186 dberr_t bulk_flush(fil_node_t *node, void *iov [[maybe_unused]],
187 size_t iov_size [[maybe_unused]]);
188
189#ifdef UNIV_LINUX
190 /** Flush 1 extent pages at a time. Uses pwritev() i/o API.
191 @param[in,out] node space file node
192 @param[in,out] iov vector IO array
193 @param[in] iov_size vector IO array size
194 @return DB_SUCCESS on success, error code on failure. */
195 dberr_t bulk_flush_linux(fil_node_t *node, struct iovec *iov,
196 size_t iov_size);
197#endif /* UNIV_LINUX */
198
199 /** Free all resources. */
201
202 /** Free any cached page load entries. */
203 void destroy_cached();
204
205 space_id_t space() const;
206
207 /** Mark the extent as cached. Flush thread should not free this extent. */
208 void set_cached() { m_is_cached.store(true); }
209
210 /** Set and unset free state of a cached extent.
211 @param[in] free state to be set */
212 void set_state(bool free) { m_is_free.store(free); }
213
214 /** @return true iff the cached element is in free state. */
215 bool is_free() const { return m_is_free.load(); }
216
217 /** @return true iff it is a cached extent. */
218 bool is_cached() const { return m_is_cached.load(); }
219
220 /** Reset page load cache to free all. */
222
223 public:
224 std::ostream &print(std::ostream &out) const;
225
226 /** Mark that this extent is used for blobs. */
227 void set_blob() { m_is_blob = true; }
228
229 /** Check if this is a blob extent.
230 @return true if it is a blob extent. */
231 bool is_blob() const { return m_is_blob; }
232
233 /** Free the BUF_BLOCK_MEMORY blocks used by this extent. */
234 void free_memory_blocks();
235
236#ifdef UNIV_DEBUG
237 /** True if this extent has been handed over to the bulk flusher. */
238 std::atomic_bool m_is_owned_by_bulk_flusher{false};
239#endif /* UNIV_DEBUG */
240
241 private:
243
244 /** true if this extent belongs to leaf segment. */
245 bool m_is_leaf{true};
246
247 /** true iff the the extent is cached. */
248 std::atomic_bool m_is_cached{false};
249 /** true if the cached entry is free to be used. */
250 std::atomic_bool m_is_free{true};
251 /** Cached page loads. */
252 std::vector<Page_load *> m_cached_page_loads;
253 /** Next cached page load index. */
255
256 /** True if this extent is used for blobs. */
257 bool m_is_blob{false};
258
259 friend struct Level_ctx;
260};
261
263 std::vector<page_no_t> &page_numbers) const {
264 for (page_no_t i = m_range.first; i < m_page_no; ++i) {
265 page_numbers.push_back(i);
266 }
267}
268
270 Page_load *page_load) {
271 ut_ad(page_no >= m_range.first);
272 ut_ad(page_no < m_range.second);
273 const size_t idx = page_no - m_range.first;
274 if (idx == m_page_loads.size()) {
275 m_page_loads.push_back(page_load);
276 } else {
277 ut_ad(idx <= m_page_loads.size());
278 ut_ad(m_page_loads[idx] == nullptr);
279 m_page_loads[idx] = page_load;
280 }
281 ut_ad(m_page_loads.size() > 0);
282}
283
285 ut_ad(page_no >= m_range.first);
286 ut_ad(page_no < m_range.second);
287 const size_t idx = page_no - m_range.first;
288 if (m_page_loads.empty() || m_page_loads.size() <= idx) {
289 return nullptr;
290 }
291 return m_page_loads[idx];
292}
293
297 m_range.first = FIL_NULL;
298 m_range.second = FIL_NULL;
299 m_btree_load = nullptr;
300}
301
302inline bool Page_extent::is_valid() const {
303 ut_ad(m_range.first != 0);
304 ut_ad(m_range.second != 0);
305 if (is_null()) {
306 return true;
307 }
308 ut_ad(m_range.first < m_range.second);
309 ut_ad((m_range.second - m_range.first) <= FSP_EXTENT_SIZE);
310 return m_range.first < m_range.second;
311}
312
313inline std::ostream &Page_extent::print(std::ostream &out) const {
314 out << "[Page_extent: this=" << (void *)this
315 << ", m_range.first=" << m_range.first
316 << ", m_range.second=" << m_range.second
317 << ", page_loads=" << m_page_loads.size() << "]" << std::endl;
318 return out;
319}
320
321inline std::ostream &operator<<(std::ostream &out, const Page_extent &obj) {
322 return obj.print(out);
323}
324
326 ut_ad(range.first != 0);
327 ut_ad(range.second != 0);
328 ut_ad(range.first != FIL_NULL);
329 ut_ad(range.second != FIL_NULL);
330 m_range = range;
331 m_page_no = m_range.first;
332}
333
335 ut_ad(is_valid());
337
338 if (m_page_no == m_range.second) {
339 return FIL_NULL;
340 }
341 return m_page_no++;
342}
343
344inline void Page_extent::init() {
345 ut_ad(m_range.first != 0);
346 ut_ad(m_range.second != 0);
347 ut_ad(m_range.first != FIL_NULL);
348 ut_ad(m_range.second != FIL_NULL);
349 m_page_no = m_range.first;
350 m_page_loads.reserve(page_count());
351}
352
354 return m_range.second - m_range.first;
355}
356
357/** Context information for each level. */
358struct Level_ctx {
359 /** Static member function construct a Level_ctx object.
360 @param[in] index dictionary index object.
361 @param[in] level the B-tree level of this context object.
362 @param[in] btree_load a back pointer to the Btree_load object to which this
363 Level_ctx object is a part of.
364 @return level context object on success, nullptr on error. */
365 static Level_ctx *create(dict_index_t *index, size_t level,
366 Btree_load *btree_load);
367
368 /** Static member function to destroy a Level_ctx object.
369 @param[in] obj the Level_ctx object to destroy. */
370 static void destroy(Level_ctx *obj);
371
372 /** Constructor
373 @param[in] index dictionary index object.
374 @param[in] level the B-tree level of this context object.
375 @param[in] btree_load a back pointer to the Btree_load object to which this
376 Level_ctx object is a part of.*/
377 Level_ctx(dict_index_t *index, size_t level, Btree_load *btree_load)
378 : m_index(index),
379 m_level(level),
381 m_btree_load(btree_load) {}
382
383 /** Destructor. */
384 ~Level_ctx();
385
386 /** Initialize.
387 @return DB_SUCCESS on success, an error code on failure. */
388 dberr_t init();
389
390 /** Check if this is leaf level.
391 @return true if this is leaf level, false otherwise. */
392 bool is_leaf() const { return m_level == 0; }
393
395
396 /** Free the current page load. */
397 void free_page_load();
398
399 /** Allocate a page number. Subsequently a Page_load will be created with the
400 allocated page number.
401 @param[out] page_no page number that was allocated.
402 @return DB_SUCCESS on success, error code on failure.*/
404
405 /** Allocate one extent in the relevant file segment. No associated buffer
406 blocks are allocated.
407 @return DB_SUCCESS on success, error code on failure.*/
409
410 /** Allocate private memory buffer (BUF_BLOCK_MEMORY) block for given page
411 number. */
412 [[nodiscard]] buf_block_t *alloc(const page_no_t new_page_no) noexcept;
413
414 void set_current_page_load(Page_load *sibling);
415
416 Page_load *get_page_load() const;
417
418 trx_id_t get_trx_id() const;
419
420 /** The current extent that is being loaded. */
422
423 /** Build the extent cache. */
424 void build_extent_cache();
425
426 /** Load one extent from extent cache.
427 @return true iff successful. */
429
430 /** Build page loader cache for current exent. */
431 void build_page_cache();
432
433 /** Get a free page loader from cache
434 @return page loader or nullptr if not found. */
436
437 /** Pre allocated extents to prevent repeated allocation and free. */
438 std::vector<Page_extent *> m_cached_extents;
439
440 /** The page_no of the first page in this level. */
442
443 /** The page_no of the last page in this level. */
445
446 /** The index which is being built. */
448
449 /** The B-tree level whose context information is stored in this obj. */
450 const size_t m_level{};
451
452 /** The Page_load of the current page being loaded. */
454
455 /** A back pointer to conceptually higher level btree load object. */
457
458 /** Number of pages allocated at this level. */
460
461 /** Number of extents allocated at this level. */
463
464 /** True if the current extent is full. */
465 bool m_extent_full{true};
466
467#ifdef UNIV_DEBUG
468 bool is_page_tracked(const page_no_t &page_no) const;
469 std::vector<page_no_t> m_pages_allocated;
470#endif /* UNIV_DEBUG */
471};
472
474
476 m_page_load = sibling;
477}
478
480 public:
481 enum class Type {
482 /** Allocate by Page */
483 PAGE,
484 /** Allocate by extent. */
485 EXTENT
486 };
487
488 /** Destructor to ensure thread stop. */
490
491 /** Check size and set extent allocator size parameters
492 @param[in] table InnoDB dictionary table object
493 @param[in] index InnoDB index being built.
494 @param[in] trx transaction performing bulk load
495 @param[in] size total data size to be loaded
496 @param[in] num_threads number of concurrent threads
497 @param[in] in_pages if true, allocate in pages
498 @return tablespace extend size in bytes. */
499 uint64_t init(dict_table_t *table, dict_index_t *index, trx_t *trx,
500 size_t size, size_t num_threads, bool in_pages);
501
502 /* Start extent allocator thread. */
503 void start();
504
505 /** Stop extent allocator thread, if active. */
506 void stop();
507
508 /** Allocate a page range - currently ans Extent.
509 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
510 @param[in] alloc_page if true, allocate in pages otherwise allocate extent
511 @param[out] range page range
512 @param[in,out] fn_wait_begin begin callback if wait is needed
513 @param[in,out] fn_wait_end end callback if wait is needed
514 @return Innodb error code. */
515 dberr_t allocate(bool is_leaf, bool alloc_page, Page_range_t &range,
516 std::function<void()> &fn_wait_begin,
517 std::function<void()> &fn_wait_end);
518
519 private:
520 /** Upper bound for max ranges. */
521 static constexpr size_t S_MAX_RANGES = 2 * 1024;
522
523 /** Maximum size by which the tablespace is extended each time. */
524 static constexpr size_t S_BULK_EXTEND_SIZE_MAX = 64;
525
527 /** Initialize cache.
528 @param[in] max_range maximum number of extents to cache. */
529 void init(size_t max_range);
530
531 /** @return true if no available extent to consume. */
532 inline bool is_empty() const { return (m_num_allocated == m_num_consumed); }
533
534 /** @return true if cache is full and no more extents can be added. */
535 inline bool is_full() const {
537 }
538
539 /** Check for number of extents to be allocated and cached.
540 @param[out] num_alloc number of extents to allocate
541 @param[out] num_free number of free extents
542 @return true if succesful. */
543 bool check(size_t &num_alloc, size_t &num_free) const;
544
545 /** Get one page range from the cache.
546 @param[out] range the allocated page range
547 @param[out] alloc_trigger true, if need to trigger allocator
548 @return true if extent is successfully returned from cache. */
549 bool get_range(Page_range_t &range, bool &alloc_trigger);
550
551 /** Set allocated range(extent) in cache.
552 @param[in] index position of the range
553 @param[in] range page range to be set */
554 void set_range(size_t index, Page_range_t &range);
555
556 /** Cached page ranges already allocated to the segment. */
557 std::array<Page_range_t, S_MAX_RANGES> m_ranges;
558
559 /** Maximum number of ranges to pre-allocate. */
561
562 /** Total number of ranges allocated. */
563 std::atomic<size_t> m_num_allocated{0};
564
565 /** Total number of ranges allocated. */
566 std::atomic<size_t> m_num_consumed{0};
567 };
568
569 /** Extent thread executor.
570 @return innodb error code. */
571 dberr_t run();
572
573 /** Allocate extents and fill the cache.
574 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
575 @param[in] num_extents number of extents to allocate
576 @return innodb error code. */
577 dberr_t allocate_extents(bool is_leaf, size_t num_extents);
578
579 /** Allocator wait function. */
580 void allocator_wait() const;
581
582 /** Check if leaf and non-leaf extent cache needs to be filled.
583 @param[out] n_leaf number of leaf extents to allocate
584 @param[out] n_non_leaf number of non-leaf extents to allocate
585 @param[out] trigger true if consumers should be triggered
586 @return true if allocator should stop. */
587 bool check(size_t &n_leaf, size_t &n_non_leaf, bool &trigger);
588
589 /** Allocate one extent.
590 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
591 @param[in,out] mtr mini tranaction to be used for allocation
592 @param[out] range page rannge for the extent
593 @return innodb error code. */
594 dberr_t allocate_extent(bool is_leaf, mtr_t &mtr, Page_range_t &range);
595
596 /** Allocate one page.
597 @param[in] is_leaf true if leaf segment, otherwise non-leaf segment
598 @param[out] range page rannge for the page
599 @return innodb error code. */
600 dberr_t allocate_page(bool is_leaf, Page_range_t &range);
601
602 /** @return true if operation is interrupted. */
603 bool is_interrupted();
604
605 private:
606 /** Bulk extent allocator. */
607 std::thread m_thread;
608
609 /** Number of times consumer(s) had to wait. */
610 mutable size_t m_consumer_wait_count{};
611
612 /** Number of times allocator had to wait. */
613 mutable size_t m_allocator_wait_count{};
614
615 /** Total consumer wait time in micro seconds. */
616 mutable std::chrono::microseconds m_consumer_wait_time;
617
618 /** Total allocator wait time in micro seconds. */
619 mutable std::chrono::microseconds m_allocator_wait_time;
620
621 /** Page range type. */
623
624 /** Cached leaf extents. */
626
627 /** Cached non-leaf extents. */
629
630 /** This mutex protects the m_queue. */
631 mutable std::mutex m_mutex;
632
633 /** Condition variable for allocator thread. */
634 mutable std::condition_variable m_allocator_condition;
635
636 /** Condition variable for extent consumer threads. */
637 mutable std::condition_variable m_consumer_condition;
638
639 /** Flag to indicate if the bulk allocator thread should stop. */
640 bool m_stop{false};
641
642 /** Error code, protected by m_mutex */
644
645 /** Innodb dictionary table object. */
648
649 /** Innodb transaction - used for checking interrupt. */
651
652 /** Number of concurrent consumers. */
654};
655
657 public:
658 /** Thread main function.
659 @return innodb error code. */
660 dberr_t run();
661
662 /** Check if work is available for the bulk flusher thread.
663 @return true if work is available. */
664 bool is_work_available();
665
666 /** Start a new thread to do the flush work.
667 @param[in] space_id space for flushing pages to
668 @param[in] index loader index
669 @param[in] queue_size flusher queue size */
670 void start(space_id_t space_id, size_t index, size_t queue_size);
671
672 /** Add a page extent to the bulk flush queue.
673 @param[in,out] page_extent extent to be added to the queue
674 @param[in,out] fn_wait_begin begin callback if wait is needed
675 @param[in,out] fn_wait_end end callback if wait is needed */
676 void add(Page_extent *page_extent, std::function<void()> &fn_wait_begin,
677 std::function<void()> &fn_wait_end);
678
679 /** Check for flusher error and wake up flusher thread.
680 @return Innodb error code. */
682
683 /** Wait till the bulk flush thread stops. */
684 void wait_to_stop();
685
686 /** Get the maximum allowed queue size.
687 @return the maximum allowed queue size. */
688 size_t get_max_queue_size() const { return m_max_queue_size; }
689
690 /** Destructor. */
692
693 /** @return true iff error has occurred. */
694 bool is_error() const { return m_is_error.load(); }
695
696 /** @return error code */
697 dberr_t get_error() const;
698
699 void add_to_free_queue(Page_extent *page_extent);
700
702
703 private:
704 /** Do the actual work of flushing.
705 @param[in,out] node space file node
706 @param[in,out] iov vector IO array
707 @param[in] iov_size vector IO array size */
708 void do_work(fil_node_t *node, void *iov, size_t iov_size);
709
710 /** Check if the bulk flush thread should stop working. */
711 bool should_i_stop() const { return m_stop.load(); }
712
713 /** When no work is available, put the thread to sleep. */
714 void wait();
715
716 /** Print useful information to the server log file while exiting. */
717 void info();
718
719 /** This queue is protected by the m_mutex. */
720 std::vector<Page_extent *> m_queue;
721
722 /** This mutex protects the m_queue. */
723 mutable std::mutex m_mutex;
724
725 /** Condition variable to wait upon. */
726 mutable std::condition_variable m_condition;
727
728 /** This queue is protected by the m_free_mutex. It is used to cache the
729 Page_extent objects that have been flushed and ready for re-use. */
730 std::vector<Page_extent *> m_free_queue;
731
732 /** This mutex protects the m_free_queue. */
733 mutable std::mutex m_free_mutex;
734
735 /** Flag to indicate if the bulk flusher thread should stop. If true, the
736 bulk flusher thread will stop after emptying the queue. If false, the
737 bulk flusher thread will go to sleep after emptying the queue. */
738 std::atomic<bool> m_stop{false};
739
740 /** Set if error is encountered during flush. */
741 std::atomic<bool> m_is_error{false};
742
743 /** Error code, protected by m_mutex */
745
746 /** Set error code.
747 @param[in] error_code error code to set. It could be DB_SUCCESS.*/
748 void set_error(dberr_t error_code);
749
750 /** Private queue (private to the bulk flush thread) containing the extents to
751 flush. */
752 std::vector<Page_extent *> m_priv_queue;
753
754 /** Bulk flusher thread. */
755 std::thread m_flush_thread;
756
757 /** Number of times slept */
758 size_t m_n_sleep{};
759
760 /** Total sleep time in micro seconds. */
761 std::chrono::microseconds m_wait_time;
762
763 /** The sleep duration in milliseconds. */
765
766 /** Maximum queue size, defaults to 4 */
768
769 /** Number of pages flushed. */
771
772 /** Bulk flusher is specific to a tablespace for now. */
774
775 /** Flusher ID. */
776 size_t m_id{};
777
778#ifdef UNIV_DEBUG
779 public:
780 /** Vector of page numbers that are flushed by this Bulk_flusher object. */
781 std::vector<page_no_t> m_flushed_page_nos;
782#endif /* UNIV_DEBUG */
783};
784
785namespace bulk {
786
787class Blob_handle;
788
789/** Used to insert many blobs into InnoDB. */
791 public:
792 /** Constructor.
793 @param[in] btree_load the B-tree into which blobs are inserted. */
794 Blob_inserter(Btree_load &btree_load);
795
797
798 /** Initialize by allocating necessary resources.
799 @return DB_SUCCESS on success or a failure error code. */
800 dberr_t init();
801
802 void finish();
803
805 Blob_context blob_ctx;
806 dberr_t err = open_blob(blob_ctx, ref);
807 if (err != DB_SUCCESS) {
808 return err;
809 }
810 const byte *data = (const byte *)dfield->data;
811 err = write_blob(blob_ctx, ref, data, dfield->len);
812 if (err != DB_SUCCESS) {
813 return err;
814 }
815 return close_blob(blob_ctx, ref);
816 }
817
818 /** Create a blob.
819 @param[out] blob_ctx pointer to an opaque object representing a blob.
820 @param[out] ref blob reference to be placed in the record.
821 @return DB_SUCCESS on success or a failure error code. */
823
824 /** Write data into the blob.
825 @param[in] blob_ctx pointer to blob into which data is written.
826 @param[out] ref blob reference to be placed in the record.
827 @param[in] data buffer containing data to be written
828 @param[in] len length of the data to be written.
829 @return DB_SUCCESS on success or a failure error code. */
830 dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data,
831 size_t len);
832
833 /** Indicate that the blob has been completed, so that resources can be
834 removed, and as necessary flushing can be done.
835 @param[in] blob_ctx pointer to blob which has been completely written.
836 @param[out] ref a blob ref object.
837 @return DB_SUCCESS on success or a failure error code. */
839
840 /** Allocate a LOB first page
841 @return a LOB first page. */
843
844 /** Allocate a data page
845 @return a LOB data page. */
847
848 /** Allocate a LOB index page.
849 @return a LOB index page. */
851
852 /** Get the current transaction id.
853 @return the current transaction id. */
854 trx_id_t get_trx_id() const;
855
856 private:
858
860
862
863 /** Page extent from which to allocate first pages of blobs.
864 @ref lob::bulk::first_page_t. */
866
868
869 /** Page extent from which to allocate data pages of blobs.
870 @ref lob::bulk::data_page_t. */
872
873 /** Page extent from which to allocate index pages of blobs.
874 @ref lob::bulk::node_page_t. */
875 std::list<Page_extent *> m_index_extents;
876
877 /** The current blob being inserted. */
879
880 /** Cache of Page_load objects. */
882
883 /** Cache of Page_extent objects. */
885
886 /** Only one blob handle per sub-tree */
888};
889
890} /* namespace bulk */
891
892/** @note We should call commit(false) for a Page_load object, which is not in
893m_page_loaders after page_commit, and we will commit or abort Page_load
894objects in function "finish". */
896 public:
897 /** Merge multiple Btree_load sub-trees together. */
898 class Merger;
899
901 return m_blob_inserter.insert_blob(ref, dfield);
902 }
903
904 /** Create a blob.
905 @param[out] blob_ctx pointer to an opaque object representing a blob.
906 @param[out] ref blob reference to be placed in the record.
907 @return DB_SUCCESS on success or a failure error code. */
909 return m_blob_inserter.open_blob(blob_ctx, ref);
910 }
911
912 /** Write data into the blob.
913 @param[in] blob_ctx pointer to blob into which data is written.
914 @param[in,out] ref blob reference of the current blob
915 @param[in] data buffer containing data to be written
916 @param[in] len length of the data to be written.
917 @return DB_SUCCESS on success or a failure error code. */
918 dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data,
919 size_t len) {
920 return m_blob_inserter.write_blob(blob_ctx, ref, data, len);
921 }
922
923 /** Indicate that the blob has been completed, so that resources can be
924 removed, and as necessary flushing can be done.
925 @param[in] blob_ctx pointer to blob which has been completely written.
926 @param[out] ref blob reference of the closed blob.
927 @return DB_SUCCESS on success or a failure error code. */
929 return m_blob_inserter.close_blob(blob_ctx, ref);
930 }
931
932 public:
933 using Page_loaders = std::vector<Page_load *, ut::allocator<Page_load *>>;
934 using Level_ctxs = std::vector<Level_ctx *, ut::allocator<Level_ctx *>>;
935
936 /** Helper to set wait callbacks for the current scope. */
938 public:
939 using Function = std::function<void()>;
940 friend class Btree_load;
941
943 : m_btree_load(btree_load) {
946 }
947
949 m_btree_load->m_fn_wait_begin = nullptr;
950 m_btree_load->m_fn_wait_end = nullptr;
951 }
952
953 private:
954 /** Btree Load for the wait callbacks. */
956 };
957
958 /** Constructor
959 @param[in] index B-tree index.
960 @param[in] trx Transaction object.
961 @param[in] loader_num loader index
962 @param[in] flush_queue_size bulk flusher queue size
963 @param[in] allocator extent allocator */
964 Btree_load(dict_index_t *index, trx_t *trx, size_t loader_num,
965 size_t flush_queue_size,
966 Bulk_extent_allocator &allocator) noexcept;
967
968 /** Destructor */
969 ~Btree_load() noexcept;
970
971 /** Initialize. Allocates the m_heap_order memory heap.
972 @return DB_SUCCESS on success or an error code on failure. */
973 dberr_t init();
974
975#ifdef UNIV_DEBUG
976 /** Save flushed page numbers for debugging purposes.
977 @param[in] page_no page number of the page that is flushed. */
979 m_bulk_flusher.m_flushed_page_nos.push_back(page_no);
980 }
981#endif /* UNIV_DEBUG */
982
983 /** Check if the index build operation has been interrupted.
984 @return true if the index build operation is interrupted, false otherwise.*/
985 bool is_interrupted() const;
986
987 /** Trigger flusher thread and check for error.
988 @return Innodb error code. */
990
991 bool is_pk() const { return m_index->is_clustered(); }
992
993 /** Get the index object.
994 @return index object. */
995 dict_index_t *index() const { return m_index; }
996
997 const char *get_table_name() const { return m_index->table->name.m_name; }
998
999 /** Get the root page number of this tree/subtree.
1000 @return the root page number of this tree/subtree. */
1002
1003 /** Get the level of the root page.
1004 @return the level of the root page. */
1005 size_t get_root_level() const { return m_root_level; }
1006
1007 /** Get information about root page. */
1008 void get_root_page_stat(Page_stat &stat);
1009
1010 /** Get the transaction id.
1011 @return the transaction id. */
1012 trx_id_t get_trx_id() const;
1013
1014 /** Btree bulk load finish. We commit the last page in each level
1015 and copy the last page in top level to the root page of the index
1016 if no error occurs.
1017 @param[in] is_err Whether bulk load was successful until now
1018 @param[in] subtree true if a subtree is being built, false otherwise.
1019 @return error code */
1020 [[nodiscard]] dberr_t finish(bool is_err, const bool subtree) noexcept;
1021
1022 /** Insert a tuple to a page in a level
1023 @param[in] dtuple Tuple to insert
1024 @param[in] level B-tree level
1025 @return error code */
1026 [[nodiscard]] dberr_t insert(dtuple_t *dtuple, size_t level) noexcept;
1027
1028 /** Split the right most block of the tree at the given level.
1029 @param[in,out] block the right most block at the given level.
1030 @param[in] level level of the given block.
1031 @param[in] node_ptr node pointer to be inserted in the block after
1032 splitting.
1033 @param[in] mtr mini transaction context.
1034 @param[in,out] highest_level highest level among all the subtrees.*/
1035 void split_rightmost(buf_block_t *block, size_t level, dtuple_t *node_ptr,
1036 mtr_t *mtr, size_t &highest_level);
1037
1038 /** Split the left most block of the tree at the given level.
1039 @param[in,out] block the left most block at the given level. it will be
1040 updated with the new left most block.
1041 @param[in] level level of the given block.
1042 @param[in] node_ptr node pointer to be inserted in the block after
1043 splitting.
1044 @param[in] mtr mini transaction context.
1045 @param[in,out] highest_level highest level among all the subtrees.*/
1046 void split_leftmost(buf_block_t *&block, size_t level, dtuple_t *node_ptr,
1047 mtr_t *mtr, size_t &highest_level);
1048
1049 private:
1050 /** Set the root page on completion.
1051 @param[in] last_page_no Last page number (the new root).
1052 @return DB_SUCCESS or error code. */
1053 [[nodiscard]] dberr_t load_root_page(page_no_t last_page_no) noexcept;
1054
1055 public:
1056 /** Commit(finish) a page. We set next/prev page no, insert a node pointer to
1057 father page if needed, and commit mini-transaction.
1058 @param[in] page_load Page to commit
1059 @param[in] next_page_load Next page
1060 @param[in] insert_father Flag whether need to insert node ptr
1061 @return error code */
1062 [[nodiscard]] dberr_t page_commit(Page_load *page_load,
1063 Page_load *next_page_load,
1064 bool insert_father) noexcept;
1065
1066 /** Prepare space to insert a tuple.
1067 @param[in,out] page_load Page bulk that will be used to store the record.
1068 It may be replaced if there is not enough space
1069 to hold the record.
1070 @param[in] level B-tree level
1071 @param[in] rec_size Record size
1072 @return error code */
1073 [[nodiscard]] dberr_t prepare_space(Page_load *&page_load, size_t level,
1074 size_t rec_size) noexcept;
1075
1076 /** Insert a tuple to a page.
1077 @param[in] page_load Page bulk object
1078 @param[in] tuple Tuple to insert
1079 @param[in] big_rec Big record vector, maybe NULL if there is no
1080 Data to be stored externally.
1081 @param[in] rec_size Record size
1082 @return error code */
1083 [[nodiscard]] dberr_t insert(Page_load *page_load, dtuple_t *tuple,
1084 big_rec_t *big_rec, size_t rec_size) noexcept;
1085
1086 /** Btree page bulk load finish. Commits the last page in each level
1087 if no error occurs. Also releases all page bulks.
1088 @param[in] is_err Whether bulk load was successful until now
1089 @param[out] last_page_no Last page number
1090 @return error code */
1091 [[nodiscard]] dberr_t finalize_page_loads(bool is_err,
1092 page_no_t &last_page_no) noexcept;
1093
1094 public:
1095 /** Allocate an extent.
1096 @param[in,out] page_range the range of pages allocated.
1097 @param[in] level btree level for which pages are allocated.
1098 @return status code. */
1099 dberr_t alloc_extent(Page_range_t &page_range, size_t level);
1100
1101 /** Initiate a direct file write operation.
1102 @param[in] block block to be written to disk.
1103 @return error code. */
1104 [[nodiscard]] dberr_t fil_io(buf_block_t *block) noexcept;
1105
1106 /** Flush the blob pages.
1107 @return status code. */
1108 [[nodiscard]] dberr_t flush_blobs() noexcept;
1109
1110 /** Add the given block the internal cache of blocks.
1111 @param[in] block the block to be cached. */
1112 inline void block_put(buf_block_t *block);
1113
1114 /** Remove the given block from the internal cache of blocks.
1115 @param[in] page_no the page number of block to be removed from cache. */
1116 inline void block_remove(const page_no_t page_no);
1117
1118 /** Search for a BUF_BLOCK_MEMORY block with given page number in the local
1119 cache.
1120 @param[in] page_no the page number of block to be fetched.
1121 @return buffer block with given page number. */
1122 [[nodiscard]] inline buf_block_t *block_get(page_no_t page_no) const noexcept;
1123
1124 /** Evict all the pages in the given range from the buffer pool.
1125 @param[in] range range of page numbers.
1126 @param[in] dirty_is_ok it is OK for a page to be dirty. */
1127 void force_evict(const Page_range_t &range, const bool dirty_is_ok = true);
1128
1129 public:
1130 /** Check if a new level is needed. */
1131 bool is_new_level(size_t level) const { return level >= m_level_ctxs.size(); }
1132
1133 /** Last page numbers of each level. */
1134 std::vector<page_no_t, ut::allocator<page_no_t>> m_last_page_nos{};
1135
1136 /** First page numbers of each level. */
1137 std::vector<page_no_t, ut::allocator<page_no_t>> m_first_page_nos{};
1138
1139 /** Get the level context object.
1140 @param[in] level the level number. level 0 is leaf level.
1141 @return the level context object. */
1142 Level_ctx *get_level(size_t level) const;
1143
1144 /** Page numbers of the pages that has been allocated in the leaf level.
1145 The page range is [p1, p2), where p2 is not included. */
1147
1148 /** Page numbers of the pages that has been allocated in the non-leaf level.
1149 The page range is [p1, p2), where p2 is not included. */
1151
1154
1155 /** State of the index. Used for asserting at the end of a
1156 bulk load operation to ensure that the online status of the
1157 index does not change */
1159
1160 /** Number of extents allocated for this B-tree. */
1162
1163 /** Number of pages allocated for this B-tree. */
1165
1166 public:
1167 std::ostream &print_left_pages(std::ostream &out) const;
1168 std::ostream &print_right_pages(std::ostream &out) const;
1169
1170 dberr_t check_key_overlap(const Btree_load *r_btree) const;
1171
1172#ifdef UNIV_DEBUG
1173 void print_tree_pages() const;
1174 std::string print_pages_in_level(const size_t level) const;
1175 /** Check size and validate index of limited size.
1176 @param[in] index Index to validate
1177 @return true if successful. */
1178 static bool validate_index(dict_index_t *index);
1179#endif /* UNIV_DEBUG */
1180
1181 /** All allocated extents registers with Btree_load. */
1182 void track_extent(Page_extent *page_extent);
1183
1184 /** Add fully used extents to the bulk flusher. Call this whenever a new
1185 Page_load is allocated, with finish set to false. Only in
1186 Btree_load::finish(), the finish argument will be true.
1187 @param[in] finish if true, add all the tracked extents to the bulk flusher,
1188 irrespective of whether it is fully used or not. */
1189 void add_to_bulk_flusher(bool finish = false);
1190
1191 /** Add the given page extent object to the bulk flusher.
1192 @param[in] page_extent the extent to be flushed. */
1193 void add_to_bulk_flusher(Page_extent *page_extent);
1194
1195 /** Check if transparent page compression (TPC) is enabled.
1196 @return true if TPC is enabled. */
1197 bool is_tpc_enabled() const;
1198
1199 /** Check if transparent page encryption (TPE) is enabled.
1200 @return true if TPE is enabled. */
1201 bool is_tpe_enabled() const;
1202
1203 /** @return get flush queue size limit. */
1206 }
1207
1208 /** If the data is already sorted and checked for duplicates, then we can
1209 disable doing it again. */
1211
1212 private:
1213 /** Page allocation type. We allocate in extents by default. */
1216
1217 /** Number of records inserted. */
1218 uint64_t m_n_recs{};
1219
1220 /** B-tree index */
1222
1224
1225 /** Transaction id */
1227
1228 /** Root page level */
1230
1231 private:
1232 /** Context information for each level of the B-tree. The leaf level is at
1233 m_level_ctxs[0]. */
1235
1236 /** Reference to global extent allocator. */
1238
1239 /** Extents that are being tracked. */
1240 std::list<Page_extent *> m_extents_tracked;
1241
1242 /** If true, check if data is inserted in sorted order. */
1243 bool m_check_order{true};
1244
1245 /** Memory heap to be used for sort order checks. */
1247
1248 /** Function object to compare two tuples. */
1250
1251 /** The previous tuple that has been inserted. */
1253
1254 bool is_extent_tracked(const Page_extent *page_extent) const;
1255
1256 /** Loader number. */
1258
1260
1261 /* Begin wait callback function. */
1263
1264 /* End wait callback function. */
1266
1267 /** Blob inserter that will be used to handle all the externally stored
1268 fields of InnoDB. */
1270
1271 /* Dedicated thread to flush pages. */
1273
1275};
1276
1278 public:
1279 using Btree_loads = std::vector<Btree_load *, ut::allocator<Btree_load *>>;
1280
1282 : m_btree_loads(loads),
1283 m_index(index),
1284 m_trx(trx),
1286
1287 dberr_t merge(bool sort);
1288
1289 private:
1290 /** Get the maximum free space available in an empty page in bytes.
1291 @return the maximum free space available in an empty page. */
1292 size_t get_max_free() const {
1294 }
1295
1296 /** Remove any empty sub-trees with no records. */
1297 void remove_empty_subtrees();
1298
1299#ifdef UNIV_DEBUG
1300 /** Validate sub-tree boundaries. */
1301 void validate_boundaries();
1302
1303#endif /* UNIV_DEBUG */
1304
1305 /** Stich sub-trees together to form a tree with one or multiple
1306 nodes at highest leve.
1307 @param[out] highest_level highest level of the merged tree.
1308 @return innodb error code. */
1309 dberr_t subtree_link_levels(size_t &highest_level);
1310
1311 /** Create root node for the stiched sub-trees by combining the nodes
1312 at highest level creating another level if required.
1313 @param[in] highest_level highest level of the merged tree.
1314 @return innodb error code. */
1315 dberr_t add_root_for_subtrees(const size_t highest_level);
1316
1317 /** Insert the given list of node pointers into pages at the given level.
1318 @param[in,out] all_node_ptrs list of node pointers
1319 @param[in,out] total_node_ptrs_size total space in bytes needed to insert
1320 all the node pointers.
1321 @param[in] level the level at which the node pointers are inserted.
1322 @return DB_SUCCESS if successful.
1323 @return error code on failure. */
1324 dberr_t insert_node_ptrs(std::vector<dtuple_t *> &all_node_ptrs,
1325 size_t &total_node_ptrs_size, size_t level);
1326
1327 /** Load the left page and update its FIL_PAGE_NEXT.
1328 @param[in] l_page_no left page number
1329 @param[in] r_page_no right page number. */
1330 void link_right_sibling(const page_no_t l_page_no, const page_no_t r_page_no);
1331
1332 private:
1333 /** Refernce to the subtrees to be merged. */
1335
1336 /** Index which is being built. */
1338
1339 /** Transaction making the changes. */
1340 const trx_t *m_trx{};
1341
1342 /** Memory heap to store node pointers. */
1344};
1345
1347 const Page_extent *page_extent) const {
1348 for (auto e : m_extents_tracked) {
1349 if (page_extent == e) {
1350 return true;
1351 }
1352 }
1353 return false;
1354}
1355
1356/** The proper function call sequence of Page_load is as below:
1357-- Page_load::init
1358-- Page_load::insert
1359-- Page_load::finish
1360-- Page_load::commit */
1362 public:
1364
1365 /** Ctor.
1366 @param[in] index B-tree index
1367 @param[in] btree_load btree object to which this page belongs. */
1368 Page_load(dict_index_t *index, Btree_load *btree_load);
1369
1370 /** Destructor. */
1371 ~Page_load() noexcept;
1372
1373 /** Check if page is corrupted.
1374 @return true if corrupted, false otherwise. */
1375 bool is_corrupted() const;
1376
1377 /** Print the child page numbers. */
1378 void print_child_page_nos() noexcept;
1379
1380 /** Check if state of this page is BUF_BLOCK_MEMORY.
1381 @return true if page state is BUF_BLOCK_MEMORY, false otherwise.*/
1382 bool is_memory() const { return m_block->is_memory(); }
1383
1384 /** A static member function to create this object.
1385 @param[in] btree_load the bulk load object to which this Page_load belongs.
1386 @param[in] page_extent page extent to which this page belongs. */
1387 static Page_load *create(Btree_load *btree_load, Page_extent *page_extent);
1388
1389 /** Release the page loader. Delete if not cached.
1390 @param[in] page_load page loader to delete. */
1391 static void drop(Page_load *page_load);
1392
1393 /** Constructor
1394 @param[in] index B-tree index
1395 @param[in] trx_id Transaction id
1396 @param[in] page_no Page number
1397 @param[in] level Page level
1398 @param[in] observer Flush observer
1399 @param[in] btree_load btree object to which this page belongs. */
1401 size_t level, Flush_observer *observer,
1402 Btree_load *btree_load = nullptr) noexcept
1403 : m_index(index),
1404 m_trx_id(trx_id),
1405 m_page_no(page_no),
1406 m_level(level),
1408 m_flush_observer(observer),
1409 m_btree_load(btree_load) {
1411 }
1412
1413 /** Set the transaction id.
1414 @param[in] trx_id the transaction id to used. */
1415 void set_trx_id(const trx_id_t trx_id) { m_trx_id = trx_id; }
1416
1417 /** Get the current transaction identifier.
1418 @return the current transaction identifier.*/
1419 trx_id_t get_trx_id() const { return m_trx_id; }
1420
1421 /** Set the flush observer.
1422 @param[in] observer the flush observer object to use. */
1424 m_flush_observer = observer;
1425 }
1426
1427 bool is_leaf() const { return m_level == 0; }
1428
1429 /** Set the page number of this object. */
1430 void set_page_no(const page_no_t page_no);
1431
1432 void set_leaf_seg(const fseg_header_t *hdr) {
1434 }
1435 void set_top_seg(const fseg_header_t *hdr) {
1437 }
1438
1439 /** Initialize members and allocate page if needed and start mtr.
1440 @note Must be called and only once right after constructor.
1441 @return error code */
1442 [[nodiscard]] dberr_t init() noexcept;
1443 [[nodiscard]] dberr_t init_mem(const page_no_t new_page_no,
1444 Page_extent *page_extent) noexcept;
1445
1446 /** Initialize a memory block to be used for storing blobs.
1447 @param[in] page_no the page number to be set in the memory block.
1448 @param[in] page_extent extent to which this page belongs.
1449 @return DB_SUCCESS on success, error code on failure.*/
1450 [[nodiscard]] dberr_t init_mem_blob(const page_no_t page_no,
1451 Page_extent *page_extent) noexcept;
1452
1453 /** Allocate a page for this Page_load object.
1454 @return DB_SUCCESS on success, error code on failure. */
1455 dberr_t alloc() noexcept;
1456
1457 /** Re-initialize this page. */
1458 [[nodiscard]] dberr_t reinit() noexcept;
1459
1460 /** Reset this object so that Page_load::init() can be called again on this
1461 object. */
1462 void reset() noexcept;
1463
1464 /** Insert a tuple in the page.
1465 @param[in] tuple Tuple to insert
1466 @param[in] big_rec External record
1467 @param[in] rec_size Record size
1468 @return error code */
1469 [[nodiscard]] dberr_t insert(const dtuple_t *tuple, const big_rec_t *big_rec,
1470 size_t rec_size) noexcept;
1471
1472 /** Mark end of insertion to the page. Scan records to set page dirs,
1473 and set page header members. The scan is incremental (slots and records
1474 which assignment could be "finalized" are not checked again. Check the
1475 m_slotted_rec_no usage, note it could be reset in some cases like
1476 during split.
1477 Note: we refer to page_copy_rec_list_end_to_created_page.*/
1478 void finish() noexcept;
1479
1480 /** Commit mtr for a page
1481 @return DB_SUCCESS on success, error code on failure. */
1482 dberr_t commit() noexcept;
1483
1484 /** Commit mtr for a page */
1485 void rollback() noexcept;
1486
1487 /** Check whether the record needs to be stored externally.
1488 @return false if the entire record can be stored locally on the page */
1489 [[nodiscard]] bool need_ext(const dtuple_t *tuple,
1490 size_t rec_size) const noexcept;
1491
1492 /** Store externally the first possible field of the given tuple.
1493 @return true if a field was stored externally, false if it was not possible
1494 to store any of the fields externally. */
1495 [[nodiscard]] bool make_ext(dtuple_t *tuple);
1496
1497 /** Get node pointer
1498 @return node pointer */
1499 [[nodiscard]] dtuple_t *get_node_ptr() noexcept;
1500
1501 /** Get node pointer
1502 @param[in] heap allocate node pointer in the given heap.
1503 @return node pointer */
1504 [[nodiscard]] dtuple_t *get_node_ptr(mem_heap_t *heap) noexcept;
1505
1506 /** Copy all records from page.
1507 @param[in] src_page Page with records to copy. */
1508 size_t copy_all(const page_t *src_page) noexcept;
1509
1510 /** Distribute all records from this page to the given pages.
1511 @param[in,out] to_pages array of Page_load objects.
1512 return total number of records processed. */
1513 size_t copy_to(std::vector<Page_load *> &to_pages);
1514
1515 /** Set next page
1516 @param[in] next_page_no Next page no */
1517 void set_next(page_no_t next_page_no) noexcept;
1518
1519 /** Set previous page
1520 @param[in] prev_page_no Previous page no */
1521 void set_prev(page_no_t prev_page_no) noexcept;
1522
1523 /** Get previous page (FIL_PAGE_PREV). */
1524 page_no_t get_prev() noexcept;
1525
1526 /** Start mtr and latch block */
1527 void latch() noexcept;
1528
1529 /** Check if required space is available in the page for the rec
1530 to be inserted. We check fill factor & padding here.
1531 @param[in] rec_size Required space
1532 @return true if space is available */
1533 [[nodiscard]] inline bool is_space_available(size_t rec_size) const noexcept;
1534
1535 /** Get the page number of this page load object.
1536 @return the page number of this page load object. */
1537 [[nodiscard]] page_no_t get_page_no() const noexcept { return m_page_no; }
1538
1539 [[nodiscard]] page_id_t get_page_id() const noexcept {
1540 return m_block->page.id;
1541 }
1542
1543 /** Get the physical page size of the underlying tablespace.
1544 @return the physical page size of the tablespace. */
1545 size_t get_page_size() const noexcept;
1546
1547 /** Get the table space ID.
1548 @return the table space ID. */
1549 space_id_t space() const noexcept;
1550
1551#ifdef UNIV_DEBUG
1552 /** Obtain tablespace id from the frame and the buffer block and ensure that
1553 they are the same.
1554 @return true if space id is same in both places. */
1555 bool verify_space_id() const;
1556#endif /* UNIV_DEBUG */
1557
1558 /** Get page level */
1559 [[nodiscard]] size_t get_level() const noexcept { return m_level; }
1560
1561 /** Set the level of this page. */
1562 void set_level(size_t level) noexcept { m_level = level; }
1563
1564 /** Get record no */
1565 [[nodiscard]] size_t get_rec_no() const { return m_rec_no; }
1566
1567 /** Get page */
1568 [[nodiscard]] const page_t *get_page() const noexcept {
1570 }
1571
1572 [[nodiscard]] page_t *get_page() noexcept {
1574 }
1575
1576 public:
1577 void init_for_writing();
1578 size_t get_data_size() const { return page_get_data_size(m_page); }
1579
1580#ifdef UNIV_DEBUG
1581 /** Check if index is X locked
1582 @return true if index is locked. */
1583 bool is_index_locked() noexcept;
1584#endif /* UNIV_DEBUG */
1585
1586 /** Copy given and all following records.
1587 @param[in] first_rec First record to copy */
1588 size_t copy_records(const rec_t *first_rec) noexcept;
1589
1590 /** Insert a record in the page, check for duplicates too.
1591 @param[in] rec Record
1592 @param[in] offsets Record offsets
1593 @return DB_SUCCESS or error code. */
1594 dberr_t insert(const rec_t *rec, Rec_offsets offsets) noexcept;
1595
1596 public:
1597 /** Store external record
1598 Since the record is not logged yet, so we don't log update to the record.
1599 the blob data is logged first, then the record is logged in bulk mode.
1600 @param[in] big_rec External record
1601 @param[in] offsets Record offsets
1602 @return error code */
1603 [[nodiscard]] dberr_t store_ext(const big_rec_t *big_rec,
1604 Rec_offsets offsets) noexcept;
1605
1606 /** Set the REC_INFO_MIN_REC_FLAG on the first user record in this page.
1607 @param[in] mtr mini transaction context. */
1608 void set_min_rec_flag(mtr_t *mtr);
1609
1610 /** Set the REC_INFO_MIN_REC_FLAG on the first user record in this page. */
1611 void set_min_rec_flag();
1612 bool is_min_rec_flag() const;
1613
1614 /** Set the level context object for this page load
1615 @param[in] level_ctx the level context object. */
1616 void set_level_ctx(Level_ctx *level_ctx) { m_level_ctx = level_ctx; }
1617
1618 /** Check if this page load object contains a level context object.
1619 @return true if the page load contains a level context object.
1620 @return false if the page load does NOT contain a level context object.*/
1621 bool has_level_ctx() const { return m_level_ctx != nullptr; }
1622
1623 /** Free the memory block. */
1624 void free();
1625
1627
1629
1630 void set_page_extent(Page_extent *page_extent) {
1631 m_page_extent = page_extent;
1632 }
1633
1634 /** Mark the Page load as cached. Flush thread should not free this Page. */
1635 void set_cached() { m_is_cached.store(true); }
1636
1637 /** @return true iff it is a cached Page Load. */
1638 bool is_cached() const { return m_is_cached.load(); }
1639
1640 private:
1641 /** Memory heap for internal allocation */
1643
1644 /** The index B-tree */
1646
1647 /** The min-transaction */
1649
1650 /** The transaction id */
1652
1653 /** The buffer block */
1655
1656 /** The page */
1658
1659 /** The current rec, just before the next insert rec */
1661
1662 /** The page no */
1664
1665 /** The page level in B-tree */
1666 size_t m_level{};
1667
1668 /** Flag: is page in compact format */
1669 const bool m_is_comp{};
1670
1671 /** The heap top in page for next insert */
1672 byte *m_heap_top{};
1673
1674 /** User record no */
1675 size_t m_rec_no{};
1676
1677 /** The free space left in the page */
1679
1680 /** The reserved space for fill factor */
1682
1683 /** Total data in the page */
1685
1686 /** The modify clock value of the buffer block
1687 when the block is re-pinned */
1688 uint64_t m_modify_clock{};
1689
1690 /** Flush observer */
1692
1693 /** Last record assigned to a slot. */
1695
1696 /** Number of records assigned to slots. */
1698
1699 /** Page modified flag. */
1701
1703
1705
1707
1708 /** true iff the the Page load is cached. */
1709 std::atomic_bool m_is_cached{false};
1710
1711 friend class Btree_load;
1712};
1713
1715 return get_node_ptr(m_heap);
1716}
1717
1718inline space_id_t Page_load::space() const noexcept { return m_index->space; }
1719
1720inline size_t Page_load::get_page_size() const noexcept {
1721 const page_size_t page_size = m_index->get_page_size();
1722 return page_size.physical();
1723}
1724
1725inline Level_ctx *Btree_load::get_level(size_t level) const {
1726 ut_a(m_level_ctxs.size() > level);
1727 return m_level_ctxs[level];
1728}
1729
1730/** Information about a buffer page. */
1732 /** Number of user records in the page. */
1733 size_t m_n_recs;
1734
1735 /** Number of bytes of data. */
1737};
1738
1739inline void Page_extent::append(Page_load *page_load) {
1740 ut_ad(page_load->get_block() != nullptr);
1741 ut_ad(page_load->is_memory());
1742 ut_ad(page_load->get_page_no() >= m_range.first);
1743 ut_ad(page_load->get_page_no() < m_range.second);
1744 for (auto &iter : m_page_loads) {
1745 if (iter->get_page_no() == page_load->get_page_no()) {
1746 /* Page already appended. Don't append again. */
1747 return;
1748 }
1749 }
1751 m_page_loads.push_back(page_load);
1752}
1753
1755 return m_btree_load->get_trx_id();
1756}
1757
1759 return m_btree_load->index()->space;
1760}
1761
1762inline Page_extent::Page_extent(Btree_load *btree_load, const bool is_leaf)
1763 : m_page_no(FIL_NULL),
1764 m_range(FIL_NULL, FIL_NULL),
1765 m_btree_load(btree_load),
1766 m_is_leaf(is_leaf) {
1768}
1769
1771 const bool is_leaf, bool skip_track) {
1772 Page_extent *p = ut::new_withkey<Page_extent>(UT_NEW_THIS_FILE_PSI_KEY,
1773 btree_load, is_leaf);
1774 if (!skip_track) {
1775 btree_load->track_extent(p);
1776 }
1777 p->m_is_cached.store(false);
1778 return p;
1779}
1780
1781inline void Page_extent::drop(Page_extent *extent) {
1782 if (extent == nullptr) {
1783 return;
1784 }
1785 if (extent->is_cached()) {
1786 ut_a(!extent->is_free());
1787 bool free = true;
1788 extent->set_state(free);
1789 return;
1790 }
1791 ut::delete_(extent);
1792}
1793
1794/** Function object to compare two Btree_load objects. */
1797 bool operator()(const Btree_load *l_btree, const Btree_load *r_btree);
1799};
1800
1801#ifdef UNIV_DEBUG
1804#endif /* UNIV_DEBUG */
1805
1807 for (auto page_load : m_page_loads) {
1808 page_load->free();
1809 }
1810}
1811
1812namespace bulk {
1814 return m_btree_load.get_trx_id();
1815}
1816} /* namespace bulk */
1817
1818} /* namespace Btree_multi */
1819
1820#endif /* btr0mtib_h */
uint32_t space_id_t
Tablespace identifier.
Definition: api0api.h:48
uint32_t page_no_t
Page number.
Definition: api0api.h:46
Kerberos Client Authentication nullptr
Definition: auth_kerberos_client_plugin.cc:247
std::pair< page_no_t, page_no_t > Page_range_t
Definition: btr0btr.h:131
The B-tree bulk load.
static buf_frame_t * buf_block_get_frame(const buf_block_t *block)
Gets a pointer to the memory frame of a block.
Definition: btr0load.h:51
Definition: btr0mtib.h:1277
Btree_loads & m_btree_loads
Refernce to the subtrees to be merged.
Definition: btr0mtib.h:1334
std::vector< Btree_load *, ut::allocator< Btree_load * > > Btree_loads
Definition: btr0mtib.h:1279
const trx_t * m_trx
Transaction making the changes.
Definition: btr0mtib.h:1340
dict_index_t * m_index
Index which is being built.
Definition: btr0mtib.h:1337
void validate_boundaries()
Validate sub-tree boundaries.
Definition: btr0mtib.cc:2698
dberr_t insert_node_ptrs(std::vector< dtuple_t * > &all_node_ptrs, size_t &total_node_ptrs_size, size_t level)
Insert the given list of node pointers into pages at the given level.
Definition: btr0mtib.cc:3164
Merger(Btree_loads &loads, dict_index_t *index, const trx_t *trx)
Definition: btr0mtib.h:1281
size_t get_max_free() const
Get the maximum free space available in an empty page in bytes.
Definition: btr0mtib.h:1292
void remove_empty_subtrees()
Remove any empty sub-trees with no records.
Definition: btr0mtib.cc:2685
dberr_t add_root_for_subtrees(const size_t highest_level)
Create root node for the stiched sub-trees by combining the nodes at highest level creating another l...
Definition: btr0mtib.cc:3006
Scoped_heap m_tuple_heap
Memory heap to store node pointers.
Definition: btr0mtib.h:1343
dberr_t merge(bool sort)
Definition: btr0mtib.cc:2631
void link_right_sibling(const page_no_t l_page_no, const page_no_t r_page_no)
Load the left page and update its FIL_PAGE_NEXT.
Definition: btr0mtib.cc:3136
dberr_t subtree_link_levels(size_t &highest_level)
Stich sub-trees together to form a tree with one or multiple nodes at highest leve.
Definition: btr0mtib.cc:2708
Helper to set wait callbacks for the current scope.
Definition: btr0mtib.h:937
~Wait_callbacks()
Definition: btr0mtib.h:948
Wait_callbacks(Btree_load *btree_load, Function &begin, Function &end)
Definition: btr0mtib.h:942
Btree_load * m_btree_load
Btree Load for the wait callbacks.
Definition: btr0mtib.h:955
std::function< void()> Function
Definition: btr0mtib.h:939
Definition: btr0mtib.h:895
~Btree_load() noexcept
Destructor.
Definition: btr0mtib.cc:1640
dict_index_t * m_index
B-tree index.
Definition: btr0mtib.h:1221
Bulk_flusher m_bulk_flusher
Definition: btr0mtib.h:1272
dberr_t finish(bool is_err, const bool subtree) noexcept
Btree bulk load finish.
Definition: btr0mtib.cc:1978
dberr_t load_root_page(page_no_t last_page_no) noexcept
Set the root page on completion.
Definition: btr0mtib.cc:1854
dberr_t trigger_flusher() const
Trigger flusher thread and check for error.
Definition: btr0mtib.h:989
bool is_tpe_enabled() const
Check if transparent page encryption (TPE) is enabled.
Definition: btr0mtib.cc:2623
Bulk_extent_allocator & m_allocator
Reference to global extent allocator.
Definition: btr0mtib.h:1237
bool is_new_level(size_t level) const
Check if a new level is needed.
Definition: btr0mtib.h:1131
bool is_pk() const
Definition: btr0mtib.h:991
dberr_t check_key_overlap(const Btree_load *r_btree) const
Definition: btr0mtib.cc:3264
Btree_load(dict_index_t *index, trx_t *trx, size_t loader_num, size_t flush_queue_size, Bulk_extent_allocator &allocator) noexcept
Constructor.
Definition: btr0mtib.cc:1622
dict_index_t * index() const
Get the index object.
Definition: btr0mtib.h:995
size_t get_max_flush_queue_size() const
Definition: btr0mtib.h:1204
void block_remove(const page_no_t page_no)
Remove the given block from the internal cache of blocks.
dberr_t insert(dtuple_t *dtuple, size_t level) noexcept
Insert a tuple to a page in a level.
Definition: btr0mtib.cc:1750
dtuple_t * m_prev_tuple
The previous tuple that has been inserted.
Definition: btr0mtib.h:1252
void force_evict(const Page_range_t &range, const bool dirty_is_ok=true)
Evict all the pages in the given range from the buffer pool.
Definition: btr0mtib.cc:2077
std::ostream & print_right_pages(std::ostream &out) const
Definition: btr0mtib.cc:2038
size_t m_root_level
Root page level.
Definition: btr0mtib.h:1229
std::vector< page_no_t, ut::allocator< page_no_t > > m_first_page_nos
First page numbers of each level.
Definition: btr0mtib.h:1137
byte m_fseg_hdr_leaf[FSEG_HEADER_SIZE]
Definition: btr0mtib.h:1152
void block_put(buf_block_t *block)
Add the given block the internal cache of blocks.
dberr_t init()
Initialize.
Definition: btr0mtib.cc:2170
Bulk_extent_allocator::Type m_alloc_type
Page allocation type.
Definition: btr0mtib.h:1214
Page_range_t m_page_range_leaf
Page numbers of the pages that has been allocated in the leaf level.
Definition: btr0mtib.h:1146
void get_root_page_stat(Page_stat &stat)
Get information about root page.
Definition: btr0mtib.cc:2088
size_t m_loader_num
Loader number.
Definition: btr0mtib.h:1257
Level_ctxs m_level_ctxs
Context information for each level of the B-tree.
Definition: btr0mtib.h:1234
page_no_t get_subtree_root() const
Get the root page number of this tree/subtree.
Definition: btr0mtib.h:1001
size_t m_stat_n_pages
Number of pages allocated for this B-tree.
Definition: btr0mtib.h:1164
trx_t * m_trx
Transaction id.
Definition: btr0mtib.h:1226
dberr_t flush_blobs() noexcept
Flush the blob pages.
dberr_t open_blob(Blob_context &blob_ctx, lob::ref_t &ref)
Create a blob.
Definition: btr0mtib.h:908
mem_heap_t * m_heap_order
Memory heap to be used for sort order checks.
Definition: btr0mtib.h:1246
dberr_t prepare_space(Page_load *&page_load, size_t level, size_t rec_size) noexcept
Prepare space to insert a tuple.
Definition: btr0mtib.cc:1648
std::list< Page_extent * > m_extents_tracked
Extents that are being tracked.
Definition: btr0mtib.h:1240
void track_page_flush(page_no_t page_no)
Save flushed page numbers for debugging purposes.
Definition: btr0mtib.h:978
std::ostream & print_left_pages(std::ostream &out) const
Definition: btr0mtib.cc:2029
ddl::Compare_key m_compare_key
Function object to compare two tuples.
Definition: btr0mtib.h:1249
bulk::Blob_inserter m_blob_inserter
Blob inserter that will be used to handle all the externally stored fields of InnoDB.
Definition: btr0mtib.h:1269
size_t get_root_level() const
Get the level of the root page.
Definition: btr0mtib.h:1005
dberr_t alloc_extent(Page_range_t &page_range, size_t level)
Allocate an extent.
Definition: btr0mtib.cc:735
bool is_extent_tracked(const Page_extent *page_extent) const
Definition: btr0mtib.h:1346
dberr_t insert_blob(lob::ref_t &ref, const dfield_t *dfield)
Definition: btr0mtib.h:900
bool m_check_order
If true, check if data is inserted in sorted order.
Definition: btr0mtib.h:1243
bool is_tpc_enabled() const
Check if transparent page compression (TPC) is enabled.
Definition: btr0mtib.cc:2613
std::vector< Page_load *, ut::allocator< Page_load * > > Page_loaders
Definition: btr0mtib.h:933
static bool validate_index(dict_index_t *index)
Check size and validate index of limited size.
Definition: btr0mtib.cc:1958
trx_id_t get_trx_id() const
Get the transaction id.
Definition: btr0mtib.cc:1638
void disable_check_order()
If the data is already sorted and checked for duplicates, then we can disable doing it again.
Definition: btr0mtib.h:1210
bool is_interrupted() const
Check if the index build operation has been interrupted.
Definition: btr0mtib.cc:3329
uint64_t m_n_recs
Number of records inserted.
Definition: btr0mtib.h:1218
Wait_callbacks::Function m_fn_wait_begin
Definition: btr0mtib.h:1262
const char * get_table_name() const
Definition: btr0mtib.h:997
void track_extent(Page_extent *page_extent)
All allocated extents registers with Btree_load.
Definition: btr0mtib.cc:2114
void split_rightmost(buf_block_t *block, size_t level, dtuple_t *node_ptr, mtr_t *mtr, size_t &highest_level)
Split the right most block of the tree at the given level.
Definition: btr0mtib.cc:3346
fil_space_t * m_space
Definition: btr0mtib.h:1223
Page_range_t m_page_range_top
Page numbers of the pages that has been allocated in the non-leaf level.
Definition: btr0mtib.h:1150
Wait_callbacks::Function m_fn_wait_end
Definition: btr0mtib.h:1265
void add_to_bulk_flusher(bool finish=false)
Add fully used extents to the bulk flusher.
Definition: btr0mtib.cc:1727
unsigned m_index_online
State of the index.
Definition: btr0mtib.h:1158
byte m_fseg_hdr_top[FSEG_HEADER_SIZE]
Definition: btr0mtib.h:1153
dberr_t page_commit(Page_load *page_load, Page_load *next_page_load, bool insert_father) noexcept
Commit(finish) a page.
Definition: btr0mtib.cc:1581
void split_leftmost(buf_block_t *&block, size_t level, dtuple_t *node_ptr, mtr_t *mtr, size_t &highest_level)
Split the left most block of the tree at the given level.
Definition: btr0mtib.cc:3445
dberr_t fil_io(buf_block_t *block) noexcept
Initiate a direct file write operation.
dberr_t finalize_page_loads(bool is_err, page_no_t &last_page_no) noexcept
Btree page bulk load finish.
Definition: btr0mtib.cc:1822
std::string print_pages_in_level(const size_t level) const
Definition: btr0mtib.cc:1905
void print_tree_pages() const
Definition: btr0mtib.cc:2048
Level_ctx * get_level(size_t level) const
Get the level context object.
Definition: btr0mtib.h:1725
dberr_t close_blob(Blob_context blob_ctx, lob::ref_t &ref)
Indicate that the blob has been completed, so that resources can be removed, and as necessary flushin...
Definition: btr0mtib.h:928
const page_size_t m_page_size
Definition: btr0mtib.h:1259
std::vector< Level_ctx *, ut::allocator< Level_ctx * > > Level_ctxs
Definition: btr0mtib.h:934
buf_block_t * block_get(page_no_t page_no) const noexcept
Search for a BUF_BLOCK_MEMORY block with given page number in the local cache.
std::vector< page_no_t, ut::allocator< page_no_t > > m_last_page_nos
Last page numbers of each level.
Definition: btr0mtib.h:1134
size_t m_stat_n_extents
Number of extents allocated for this B-tree.
Definition: btr0mtib.h:1161
dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data, size_t len)
Write data into the blob.
Definition: btr0mtib.h:918
Definition: btr0mtib.h:479
~Bulk_extent_allocator()
Destructor to ensure thread stop.
Definition: btr0mtib.h:489
Extent_cache m_leaf_extents
Cached leaf extents.
Definition: btr0mtib.h:625
dict_table_t * m_table
Innodb dictionary table object.
Definition: btr0mtib.h:646
std::chrono::microseconds m_allocator_wait_time
Total allocator wait time in micro seconds.
Definition: btr0mtib.h:619
size_t m_allocator_wait_count
Number of times allocator had to wait.
Definition: btr0mtib.h:613
size_t m_concurrency
Number of concurrent consumers.
Definition: btr0mtib.h:653
uint64_t init(dict_table_t *table, dict_index_t *index, trx_t *trx, size_t size, size_t num_threads, bool in_pages)
Check size and set extent allocator size parameters.
Definition: btr0mtib.cc:2198
std::condition_variable m_consumer_condition
Condition variable for extent consumer threads.
Definition: btr0mtib.h:637
bool check(size_t &n_leaf, size_t &n_non_leaf, bool &trigger)
Check if leaf and non-leaf extent cache needs to be filled.
Definition: btr0mtib.cc:2472
std::thread m_thread
Bulk extent allocator.
Definition: btr0mtib.h:607
size_t m_consumer_wait_count
Number of times consumer(s) had to wait.
Definition: btr0mtib.h:610
static constexpr size_t S_BULK_EXTEND_SIZE_MAX
Maximum size by which the tablespace is extended each time.
Definition: btr0mtib.h:524
dberr_t allocate(bool is_leaf, bool alloc_page, Page_range_t &range, std::function< void()> &fn_wait_begin, std::function< void()> &fn_wait_end)
Allocate a page range - currently ans Extent.
Definition: btr0mtib.cc:2356
dberr_t m_error
Error code, protected by m_mutex.
Definition: btr0mtib.h:643
static constexpr size_t S_MAX_RANGES
Upper bound for max ranges.
Definition: btr0mtib.h:521
dict_index_t * m_index
Definition: btr0mtib.h:647
trx_t * m_trx
Innodb transaction - used for checking interrupt.
Definition: btr0mtib.h:650
Extent_cache m_non_leaf_extents
Cached non-leaf extents.
Definition: btr0mtib.h:628
void allocator_wait() const
Allocator wait function.
Definition: btr0mtib.cc:2492
dberr_t allocate_extent(bool is_leaf, mtr_t &mtr, Page_range_t &range)
Allocate one extent.
Definition: btr0mtib.cc:2351
bool is_interrupted()
Definition: btr0mtib.cc:2302
dberr_t allocate_page(bool is_leaf, Page_range_t &range)
Allocate one page.
Definition: btr0mtib.cc:2306
dberr_t run()
Extent thread executor.
Definition: btr0mtib.cc:2559
void start()
Definition: btr0mtib.cc:2269
std::mutex m_mutex
This mutex protects the m_queue.
Definition: btr0mtib.h:631
Type m_type
Page range type.
Definition: btr0mtib.h:622
Type
Definition: btr0mtib.h:481
bool m_stop
Flag to indicate if the bulk allocator thread should stop.
Definition: btr0mtib.h:640
std::chrono::microseconds m_consumer_wait_time
Total consumer wait time in micro seconds.
Definition: btr0mtib.h:616
dberr_t allocate_extents(bool is_leaf, size_t num_extents)
Allocate extents and fill the cache.
Definition: btr0mtib.cc:2511
void stop()
Stop extent allocator thread, if active.
Definition: btr0mtib.cc:2277
std::condition_variable m_allocator_condition
Condition variable for allocator thread.
Definition: btr0mtib.h:634
Definition: btr0mtib.h:656
void do_work(fil_node_t *node, void *iov, size_t iov_size)
Do the actual work of flushing.
Definition: btr0mtib.cc:121
dberr_t check_and_notify() const
Check for flusher error and wake up flusher thread.
Definition: btr0mtib.cc:148
dberr_t m_error
Error code, protected by m_mutex.
Definition: btr0mtib.h:744
void add_to_free_queue(Page_extent *page_extent)
Definition: btr0mtib.cc:169
size_t m_pages_flushed
Number of pages flushed.
Definition: btr0mtib.h:770
space_id_t m_space_id
Bulk flusher is specific to a tablespace for now.
Definition: btr0mtib.h:773
std::atomic< bool > m_is_error
Set if error is encountered during flush.
Definition: btr0mtib.h:741
std::atomic< bool > m_stop
Flag to indicate if the bulk flusher thread should stop.
Definition: btr0mtib.h:738
dberr_t get_error() const
Definition: btr0mtib.cc:83
std::vector< Page_extent * > m_free_queue
This queue is protected by the m_free_mutex.
Definition: btr0mtib.h:730
dberr_t run()
Thread main function.
Definition: btr0mtib.cc:220
size_t m_id
Flusher ID.
Definition: btr0mtib.h:776
bool is_error() const
Definition: btr0mtib.h:694
Page_extent * get_free_extent()
Definition: btr0mtib.cc:159
std::mutex m_free_mutex
This mutex protects the m_free_queue.
Definition: btr0mtib.h:733
bool should_i_stop() const
Check if the bulk flush thread should stop working.
Definition: btr0mtib.h:711
size_t m_n_sleep
Number of times slept.
Definition: btr0mtib.h:758
std::mutex m_mutex
This mutex protects the m_queue.
Definition: btr0mtib.h:723
~Bulk_flusher()
Destructor.
Definition: btr0mtib.cc:97
void set_error(dberr_t error_code)
Set error code.
Definition: btr0mtib.cc:88
std::vector< Page_extent * > m_queue
This queue is protected by the m_mutex.
Definition: btr0mtib.h:720
std::thread m_flush_thread
Bulk flusher thread.
Definition: btr0mtib.h:755
size_t m_max_queue_size
Maximum queue size, defaults to 4.
Definition: btr0mtib.h:767
void start(space_id_t space_id, size_t index, size_t queue_size)
Start a new thread to do the flush work.
Definition: btr0mtib.cc:69
std::chrono::microseconds m_wait_time
Total sleep time in micro seconds.
Definition: btr0mtib.h:761
bool is_work_available()
Check if work is available for the bulk flusher thread.
Definition: btr0mtib.cc:204
std::vector< Page_extent * > m_priv_queue
Private queue (private to the bulk flush thread) containing the extents to flush.
Definition: btr0mtib.h:752
void wait_to_stop()
Wait till the bulk flush thread stops.
Definition: btr0mtib.cc:108
std::vector< page_no_t > m_flushed_page_nos
Vector of page numbers that are flushed by this Bulk_flusher object.
Definition: btr0mtib.h:781
size_t get_max_queue_size() const
Get the maximum allowed queue size.
Definition: btr0mtib.h:688
std::condition_variable m_condition
Condition variable to wait upon.
Definition: btr0mtib.h:726
void info()
Print useful information to the server log file while exiting.
Definition: btr0mtib.cc:2183
void wait()
When no work is available, put the thread to sleep.
Definition: btr0mtib.cc:281
void add(Page_extent *page_extent, std::function< void()> &fn_wait_begin, std::function< void()> &fn_wait_end)
Add a page extent to the bulk flush queue.
Definition: btr0mtib.cc:174
static constexpr std::chrono::milliseconds s_sleep_duration
The sleep duration in milliseconds.
Definition: btr0mtib.h:764
The proper function call sequence of Page_load is as below: – Page_load::init – Page_load::insert – P...
Definition: btr0mtib.h:1361
dberr_t init_mem(const page_no_t new_page_no, Page_extent *page_extent) noexcept
Definition: btr0mtib.cc:997
dberr_t store_ext(const big_rec_t *big_rec, Rec_offsets offsets) noexcept
Store external record Since the record is not logged yet, so we don't log update to the record.
void set_level(size_t level) noexcept
Set the level of this page.
Definition: btr0mtib.h:1562
buf_block_t * get_block()
Definition: btr0mtib.h:1628
space_id_t space() const noexcept
Get the table space ID.
Definition: btr0mtib.h:1718
void rollback() noexcept
Commit mtr for a page.
Definition: btr0mtib.cc:1423
dberr_t init_mem_blob(const page_no_t page_no, Page_extent *page_extent) noexcept
Initialize a memory block to be used for storing blobs.
Definition: btr0mtib.cc:960
bool is_corrupted() const
Check if page is corrupted.
Definition: btr0mtib.cc:326
trx_id_t m_trx_id
The transaction id.
Definition: btr0mtib.h:1651
byte * m_heap_top
The heap top in page for next insert.
Definition: btr0mtib.h:1672
size_t get_level() const noexcept
Get page level.
Definition: btr0mtib.h:1559
rec_t * m_last_slotted_rec
Last record assigned to a slot.
Definition: btr0mtib.h:1694
bool make_ext(dtuple_t *tuple)
Store externally the first possible field of the given tuple.
Definition: btr0mtib.cc:1547
dict_index_t * index()
Definition: btr0mtib.h:1626
void set_level_ctx(Level_ctx *level_ctx)
Set the level context object for this page load.
Definition: btr0mtib.h:1616
void set_trx_id(const trx_id_t trx_id)
Set the transaction id.
Definition: btr0mtib.h:1415
size_t copy_to(std::vector< Page_load * > &to_pages)
Distribute all records from this page to the given pages.
Definition: btr0mtib.cc:1461
size_t copy_records(const rec_t *first_rec) noexcept
Copy given and all following records.
Definition: btr0mtib.cc:1492
void set_flush_observer(Flush_observer *observer)
Set the flush observer.
Definition: btr0mtib.h:1423
void set_page_extent(Page_extent *page_extent)
Definition: btr0mtib.h:1630
size_t get_rec_no() const
Get record no.
Definition: btr0mtib.h:1565
trx_id_t get_trx_id() const
Get the current transaction identifier.
Definition: btr0mtib.h:1419
void set_min_rec_flag()
Set the REC_INFO_MIN_REC_FLAG on the first user record in this page.
Definition: btr0mtib.cc:2058
page_no_t get_page_no() const noexcept
Get the page number of this page load object.
Definition: btr0mtib.h:1537
void set_next(page_no_t next_page_no) noexcept
Set next page.
Definition: btr0mtib.cc:1511
size_t get_page_size() const noexcept
Get the physical page size of the underlying tablespace.
Definition: btr0mtib.h:1720
dict_index_t * m_index
The index B-tree.
Definition: btr0mtib.h:1645
size_t m_slotted_rec_no
Number of records assigned to slots.
Definition: btr0mtib.h:1697
dberr_t insert(const dtuple_t *tuple, const big_rec_t *big_rec, size_t rec_size) noexcept
Insert a tuple in the page.
Definition: btr0mtib.cc:1302
Flush_observer * m_flush_observer
Flush observer.
Definition: btr0mtib.h:1691
bool verify_space_id() const
Obtain tablespace id from the frame and the buffer block and ensure that they are the same.
Definition: btr0mtib.cc:3334
void free()
Free the memory block.
Definition: btr0mtib.cc:2106
uint64_t m_modify_clock
The modify clock value of the buffer block when the block is re-pinned.
Definition: btr0mtib.h:1688
void set_prev(page_no_t prev_page_no) noexcept
Set previous page.
Definition: btr0mtib.cc:1515
bool has_level_ctx() const
Check if this page load object contains a level context object.
Definition: btr0mtib.h:1621
Page_load(dict_index_t *index, Btree_load *btree_load)
Ctor.
Definition: btr0mtib.cc:928
size_t m_rec_no
User record no.
Definition: btr0mtib.h:1675
std::atomic_bool m_is_cached
true iff the the Page load is cached.
Definition: btr0mtib.h:1709
static void drop(Page_load *page_load)
Release the page loader.
Definition: btr0mtib.cc:709
bool is_memory() const
Check if state of this page is BUF_BLOCK_MEMORY.
Definition: btr0mtib.h:1382
bool is_leaf() const
Definition: btr0mtib.h:1427
bool is_space_available(size_t rec_size) const noexcept
Check if required space is available in the page for the rec to be inserted.
Definition: btr0mtib.cc:1523
void reset() noexcept
Reset this object so that Page_load::init() can be called again on this object.
Definition: btr0mtib.cc:1137
size_t get_data_size() const
Definition: btr0mtib.h:1578
bool need_ext(const dtuple_t *tuple, size_t rec_size) const noexcept
Check whether the record needs to be stored externally.
Definition: btr0mtib.cc:1565
size_t m_level
The page level in B-tree.
Definition: btr0mtib.h:1666
rec_t * m_cur_rec
The current rec, just before the next insert rec.
Definition: btr0mtib.h:1660
dberr_t alloc() noexcept
Allocate a page for this Page_load object.
Definition: btr0mtib.cc:1090
void set_page_no(const page_no_t page_no)
Set the page number of this object.
Definition: btr0mtib.cc:919
size_t m_reserved_space
The reserved space for fill factor.
Definition: btr0mtib.h:1681
mem_heap_t * m_heap
Memory heap for internal allocation.
Definition: btr0mtib.h:1642
static Page_load * create(Btree_load *btree_load, Page_extent *page_extent)
A static member function to create this object.
Definition: btr0mtib.cc:699
page_id_t get_page_id() const noexcept
Definition: btr0mtib.h:1539
void latch() noexcept
Start mtr and latch block.
Page_extent * m_page_extent
Definition: btr0mtib.h:1706
bool is_cached() const
Definition: btr0mtib.h:1638
page_t * get_page() noexcept
Definition: btr0mtib.h:1572
bool is_index_locked() noexcept
Check if index is X locked.
Definition: btr0mtib.cc:1572
Page_load(dict_index_t *index, trx_id_t trx_id, page_no_t page_no, size_t level, Flush_observer *observer, Btree_load *btree_load=nullptr) noexcept
Constructor.
Definition: btr0mtib.h:1400
void set_top_seg(const fseg_header_t *hdr)
Definition: btr0mtib.h:1435
dberr_t reinit() noexcept
Re-initialize this page.
Definition: btr0mtib.cc:1069
~Page_load() noexcept
Destructor.
Definition: btr0mtib.cc:3318
void set_leaf_seg(const fseg_header_t *hdr)
Definition: btr0mtib.h:1432
bool is_min_rec_flag() const
Definition: btr0mtib.cc:2060
dberr_t commit() noexcept
Commit mtr for a page.
Definition: btr0mtib.cc:1393
size_t copy_all(const page_t *src_page) noexcept
Copy all records from page.
Definition: btr0mtib.cc:1452
size_t m_free_space
The free space left in the page.
Definition: btr0mtib.h:1678
void set_cached()
Mark the Page load as cached.
Definition: btr0mtib.h:1635
const bool m_is_comp
Flag: is page in compact format.
Definition: btr0mtib.h:1669
Btree_load * m_btree_load
Definition: btr0mtib.h:1702
page_no_t get_prev() noexcept
Get previous page (FIL_PAGE_PREV).
Definition: btr0mtib.cc:1519
mtr_t * m_mtr
The min-transaction.
Definition: btr0mtib.h:1648
const page_t * get_page() const noexcept
Get page.
Definition: btr0mtib.h:1568
bool m_modified
Page modified flag.
Definition: btr0mtib.h:1700
buf_block_t * m_block
The buffer block.
Definition: btr0mtib.h:1654
void print_child_page_nos() noexcept
Print the child page numbers.
Definition: btr0mtib.cc:1436
page_no_t m_page_no
The page no.
Definition: btr0mtib.h:1663
Level_ctx * m_level_ctx
Definition: btr0mtib.h:1704
dberr_t init() noexcept
Initialize members and allocate page if needed and start mtr.
Definition: btr0mtib.cc:1169
size_t m_total_data
Total data in the page.
Definition: btr0mtib.h:1684
void init_for_writing()
Definition: btr0mtib.cc:341
void finish() noexcept
Mark end of insertion to the page.
Definition: btr0mtib.cc:1334
page_t * m_page
The page.
Definition: btr0mtib.h:1657
dtuple_t * get_node_ptr() noexcept
Get node pointer.
Definition: btr0mtib.h:1714
Used to insert many blobs into InnoDB.
Definition: btr0mtib.h:790
Page_load * alloc_data_page()
Allocate a data page.
Definition: btr0mtib.cc:3910
Page_range_t m_page_range_first
Definition: btr0mtib.h:867
Page_load * alloc_first_page()
Allocate a LOB first page.
Definition: btr0mtib.cc:3890
Page_load * alloc_index_page()
Allocate a LOB index page.
Definition: btr0mtib.cc:3894
ut::Object_cache< Page_load > m_page_load_cache
Cache of Page_load objects.
Definition: btr0mtib.h:881
Page_extent * alloc_free_extent()
Definition: btr0mtib.cc:3849
Blob_inserter(Btree_load &btree_load)
Constructor.
Definition: btr0mtib.cc:3690
Page_load * alloc_page_from_extent(Page_extent *&m_page_extent)
Definition: btr0mtib.cc:3858
trx_id_t get_trx_id() const
Get the current transaction id.
Definition: btr0mtib.h:1813
~Blob_inserter()
Definition: btr0mtib.cc:3937
Page_extent * m_page_extent_first
Page extent from which to allocate first pages of blobs.
Definition: btr0mtib.h:865
dberr_t write_blob(Blob_context blob_ctx, lob::ref_t &ref, const byte *data, size_t len)
Write data into the blob.
Definition: btr0mtib.cc:3815
ut::Object_cache< Page_extent > m_page_extent_cache
Cache of Page_extent objects.
Definition: btr0mtib.h:884
dberr_t close_blob(Blob_context blob_ctx, lob::ref_t &ref)
Indicate that the blob has been completed, so that resources can be removed, and as necessary flushin...
Definition: btr0mtib.cc:3821
void finish()
Definition: btr0mtib.cc:3918
std::list< Page_extent * > m_index_extents
Page extent from which to allocate index pages of blobs.
Definition: btr0mtib.h:875
Blob_context m_blob
The current blob being inserted.
Definition: btr0mtib.h:878
dberr_t insert_blob(lob::ref_t &ref, const dfield_t *dfield)
Definition: btr0mtib.h:804
dberr_t open_blob(Blob_context &blob_ctx, lob::ref_t &ref)
Create a blob.
Definition: btr0mtib.cc:3810
dberr_t init()
Initialize by allocating necessary resources.
Definition: btr0mtib.cc:3697
Btree_load & m_btree_load
Definition: btr0mtib.h:861
ut::unique_ptr< Blob_handle > m_blob_handle
Only one blob handle per sub-tree.
Definition: btr0mtib.h:887
Page_extent * m_page_extent_data
Page extent from which to allocate data pages of blobs.
Definition: btr0mtib.h:871
We use Flush_observer to track flushing of non-redo logged pages in bulk create index(btr0load....
Definition: buf0flu.h:258
The proper function call sequence of Page_load is as below: – Page_load::init – Page_load::insert – P...
Definition: btr0load.cc:54
A helper RAII wrapper for otherwise difficult to use sequence of:
Definition: rem0rec.h:292
page_id_t id
Page id.
Definition: buf0buf.h:1387
Page identifier.
Definition: buf0types.h:207
Page size descriptor.
Definition: page0size.h:50
size_t physical() const
Retrieve the physical page size (on-disk).
Definition: page0size.h:121
A utility class which, if inherited from, prevents the descendant class from being copied,...
Definition: ut0class_life_cycle.h:41
A class to manage objects of type T.
Definition: ut0object_cache.h:40
const char * p
Definition: ctype-mb.cc:1227
dberr_t
Definition: db0err.h:39
@ DB_SUCCESS
Definition: db0err.h:43
DDL key comparison.
Data dictionary system.
static bool dict_table_is_comp(const dict_table_t *table)
Check whether the table uses the compact page format.
static ulint dict_index_is_spatial(const dict_index_t *index)
Check whether the index is a Spatial Index.
constexpr page_no_t FIL_NULL
'null' (undefined) page offset in the context of file spaces
Definition: fil0fil.h:1161
#define FSP_EXTENT_SIZE
File space extent size in pages page size | file space extent size -------—+--------------------— 4 K...
Definition: fsp0types.h:64
constexpr uint32_t FSEG_HEADER_SIZE
Length of the file system header, in bytes.
Definition: fsp0types.h:94
byte fseg_header_t
Data type for file segment header.
Definition: fsp0types.h:85
#define free(A)
Definition: lexyy.cc:915
For bulk loading large objects.
Implements the large objects (LOB) module.
Definition: btr0mtib.cc:56
void * Blob_context
Definition: btr0mtib.h:60
void bulk_load_enable_slow_io_debug()
Definition: btr0mtib.cc:60
void bulk_load_disable_slow_io_debug()
Definition: btr0mtib.cc:61
std::ostream & operator<<(std::ostream &out, const Page_extent &obj)
Definition: btr0mtib.h:321
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Used for bulk load of data.
Definition: fut0lst.cc:411
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
bool index(const std::string &value, const String &search_for, uint32_t *idx)
Definition: contains.h:75
static Value err()
Create a Value object that represents an error condition.
Definition: json_binary.cc:905
std::chrono::milliseconds milliseconds
Definition: authorize_manager.cc:68
const char * begin(const char *const c)
Definition: base64.h:44
size_t size(const char *const c)
Definition: base64.h:46
Cursor end()
A past-the-end Cursor.
Definition: rules_table_service.cc:192
Definition: gcs_xcom_synode.h:64
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2876
void delete_(T *ptr) noexcept
Releases storage which has been dynamically allocated through any of the ut::new*() variants.
Definition: ut0new.h:811
std::conditional_t< !std::is_array< T >::value, std::unique_ptr< T, detail::Deleter< T > >, std::conditional_t< detail::is_unbounded_array_v< T >, std::unique_ptr< T, detail::Array_deleter< std::remove_extent_t< T > > >, void > > unique_ptr
The following is a common type that is returned by all the ut::make_unique (non-aligned) specializati...
Definition: ut0new.h:2440
The page cursor.
static ulint page_get_free_space_of_empty(bool comp)
Calculates free space if a page is emptied.
static ulint page_get_data_size(const page_t *page)
Returns the sum of the sizes of the records in the record list excluding the infimum and supremum rec...
constexpr uint32_t PAGE_HEADER
index page header starts at this offset
Definition: page0types.h:53
constexpr uint32_t PAGE_BTR_SEG_LEAF
file segment header for the leaf pages in a B-tree: defined only on the root page of a B-tree,...
Definition: page0types.h:90
constexpr uint32_t PAGE_BTR_SEG_TOP
Definition: page0types.h:98
byte page_t
Type of the index page.
Definition: page0types.h:152
byte rec_t
Definition: rem0types.h:41
Function object to compare two Btree_load objects.
Definition: btr0mtib.h:1795
dict_index_t * m_index
Definition: btr0mtib.h:1798
bool operator()(const Btree_load *l_btree, const Btree_load *r_btree)
Definition: btr0mtib.cc:2125
Btree_load_compare(dict_index_t *index)
Definition: btr0mtib.h:1796
bool is_empty() const
Definition: btr0mtib.h:532
size_t m_max_range
Maximum number of ranges to pre-allocate.
Definition: btr0mtib.h:560
void init(size_t max_range)
Initialize cache.
Definition: btr0mtib.cc:2190
bool check(size_t &num_alloc, size_t &num_free) const
Check for number of extents to be allocated and cached.
Definition: btr0mtib.cc:2453
std::array< Page_range_t, S_MAX_RANGES > m_ranges
Cached page ranges already allocated to the segment.
Definition: btr0mtib.h:557
std::atomic< size_t > m_num_consumed
Total number of ranges allocated.
Definition: btr0mtib.h:566
void set_range(size_t index, Page_range_t &range)
Set allocated range(extent) in cache.
Definition: btr0mtib.cc:2444
std::atomic< size_t > m_num_allocated
Total number of ranges allocated.
Definition: btr0mtib.h:563
bool is_full() const
Definition: btr0mtib.h:535
bool get_range(Page_range_t &range, bool &alloc_trigger)
Get one page range from the cache.
Definition: btr0mtib.cc:2424
Context information for each level.
Definition: btr0mtib.h:358
size_t m_stat_n_extents
Number of extents allocated at this level.
Definition: btr0mtib.h:462
buf_block_t * alloc(const page_no_t new_page_no) noexcept
Allocate private memory buffer (BUF_BLOCK_MEMORY) block for given page number.
Definition: btr0mtib.cc:889
dict_index_t * m_index
The index which is being built.
Definition: btr0mtib.h:447
void build_page_cache()
Build page loader cache for current exent.
Definition: btr0mtib.cc:802
bool load_extent_from_cache()
Load one extent from extent cache.
Definition: btr0mtib.cc:749
dberr_t init()
Initialize.
Definition: btr0mtib.cc:842
Btree_load * m_btree_load
A back pointer to conceptually higher level btree load object.
Definition: btr0mtib.h:456
void set_current_page_load(Page_load *sibling)
Definition: btr0mtib.h:475
page_no_t m_last_page
The page_no of the last page in this level.
Definition: btr0mtib.h:444
~Level_ctx()
Destructor.
Definition: btr0mtib.cc:3316
const size_t m_level
The B-tree level whose context information is stored in this obj.
Definition: btr0mtib.h:450
void build_extent_cache()
Build the extent cache.
Definition: btr0mtib.cc:816
void free_page_load()
Free the current page load.
Definition: btr0mtib.cc:730
Page_load * create_page_load()
Definition: btr0mtib.cc:716
std::vector< page_no_t > m_pages_allocated
Definition: btr0mtib.h:469
trx_id_t get_trx_id() const
Definition: btr0mtib.h:1754
static void destroy(Level_ctx *obj)
Static member function to destroy a Level_ctx object.
Definition: btr0mtib.cc:685
std::vector< Page_extent * > m_cached_extents
Pre allocated extents to prevent repeated allocation and free.
Definition: btr0mtib.h:438
dberr_t alloc_page_num(page_no_t &page_no)
Allocate a page number.
Definition: btr0mtib.cc:618
Level_ctx(dict_index_t *index, size_t level, Btree_load *btree_load)
Constructor.
Definition: btr0mtib.h:377
bool m_extent_full
True if the current extent is full.
Definition: btr0mtib.h:465
dberr_t alloc_extent()
Allocate one extent in the relevant file segment.
Definition: btr0mtib.cc:655
size_t m_stat_n_pages
Number of pages allocated at this level.
Definition: btr0mtib.h:459
bool is_page_tracked(const page_no_t &page_no) const
Definition: btr0mtib.cc:649
Page_load * m_page_load
The Page_load of the current page being loaded.
Definition: btr0mtib.h:453
Page_load * get_page_load_from_cache()
Get a free page loader from cache.
Definition: btr0mtib.cc:784
Page_extent * m_page_extent
The current extent that is being loaded.
Definition: btr0mtib.h:421
page_no_t m_first_page
The page_no of the first page in this level.
Definition: btr0mtib.h:441
Page_load * get_page_load() const
Definition: btr0mtib.h:473
bool is_leaf() const
Check if this is leaf level.
Definition: btr0mtib.h:392
static Level_ctx * create(dict_index_t *index, size_t level, Btree_load *btree_load)
Static member function construct a Level_ctx object.
Definition: btr0mtib.cc:677
Allocate, use, manage and flush one extent pages (FSP_EXTENT_SIZE).
Definition: btr0mtib.h:67
Page_extent(Btree_load *btree_load, const bool is_leaf)
Constructor.
Definition: btr0mtib.h:1762
static void drop(Page_extent *extent)
Release the page extent.
Definition: btr0mtib.h:1781
bool is_blob() const
Check if this is a blob extent.
Definition: btr0mtib.h:231
void set_cached()
Mark the extent as cached.
Definition: btr0mtib.h:208
size_t m_next_cached_page_load_index
Next cached page load index.
Definition: btr0mtib.h:254
void get_page_numbers(std::vector< page_no_t > &page_numbers) const
Definition: btr0mtib.h:262
std::atomic_bool m_is_free
true if the cached entry is free to be used.
Definition: btr0mtib.h:250
std::vector< Page_load * > m_page_loads
All the page loaders of the used pages.
Definition: btr0mtib.h:87
bool is_null() const
Definition: btr0mtib.h:121
bool is_valid() const
Check if the range is valid.
Definition: btr0mtib.h:302
bool m_is_blob
True if this extent is used for blobs.
Definition: btr0mtib.h:257
dberr_t flush(fil_node_t *node, void *iov, size_t iov_size)
Flush the used pages to disk.
Definition: btr0mtib.cc:537
bool is_any_used() const
Check if there are any pages used.
Definition: btr0mtib.h:153
page_no_t alloc()
Allocate a page number.
Definition: btr0mtib.h:334
void init()
Initialize the next page number to be allocated.
Definition: btr0mtib.h:344
std::ostream & print(std::ostream &out) const
Definition: btr0mtib.h:313
dberr_t destroy()
Free all resources.
Definition: btr0mtib.cc:608
std::pair< page_no_t, page_no_t > Page_range_t
Definition: btr0mtib.h:68
std::vector< Page_load * > m_cached_page_loads
Cached page loads.
Definition: btr0mtib.h:252
size_t used_pages() const
Calculate the number of used pages.
Definition: btr0mtib.h:109
void set_page_load(page_no_t page_no, Page_load *page_load)
Member of Page_extent.
Definition: btr0mtib.h:269
page_no_t page_count() const
Number of pages in this extent.
Definition: btr0mtib.h:353
dberr_t flush_one_by_one(fil_node_t *node)
Flush one page at a time.
Definition: btr0mtib.cc:421
dberr_t bulk_flush(fil_node_t *node, void *iov, size_t iov_size)
Flush 1 extent pages at a time.
Definition: btr0mtib.cc:522
page_no_t m_page_no
Next page number to be used.
Definition: btr0mtib.h:80
void set_blob()
Mark that this extent is used for blobs.
Definition: btr0mtib.h:227
void reset_range(const Page_range_t &range)
Reset the range with the given value.
Definition: btr0mtib.h:325
std::atomic_bool m_is_owned_by_bulk_flusher
True if this extent has been handed over to the bulk flusher.
Definition: btr0mtib.h:238
Page_range_t m_range
Page numbers of the pages that has been allocated in this extent.
Definition: btr0mtib.h:84
bool is_fully_used() const
Check if no more pages are there to be used.
Definition: btr0mtib.h:148
bool is_free() const
Definition: btr0mtib.h:215
void destroy_cached()
Free any cached page load entries.
Definition: btr0mtib.cc:600
void append(Page_load *page_load)
Save a page_load.
Definition: btr0mtib.h:1739
Page_range_t pages_to_free() const
size_t last() const
Get the index of the first unused page load.
Definition: btr0mtib.h:115
void set_state(bool free)
Set and unset free state of a cached extent.
Definition: btr0mtib.h:212
std::atomic_bool m_is_cached
true iff the the extent is cached.
Definition: btr0mtib.h:248
void free_memory_blocks()
Free the BUF_BLOCK_MEMORY blocks used by this extent.
Definition: btr0mtib.h:1806
bool is_btree_load_nullptr() const
Definition: btr0mtib.h:89
bool is_cached() const
Definition: btr0mtib.h:218
void reset_cached_page_loads()
Reset page load cache to free all.
Definition: btr0mtib.h:221
space_id_t space() const
Definition: btr0mtib.h:1758
static Page_extent * create(Btree_load *btree_load, const bool is_leaf, const bool is_blob)
Create an object of type Page_extent in the heap.
Definition: btr0mtib.h:1770
Page_load * get_page_load(page_no_t page_no)
Member of Page_extent.
Definition: btr0mtib.h:284
~Page_extent()
Destructor.
Definition: btr0mtib.h:294
Btree_load * m_btree_load
Definition: btr0mtib.h:242
bool m_is_leaf
true if this extent belongs to leaf segment.
Definition: btr0mtib.h:245
Information about a buffer page.
Definition: btr0mtib.h:1731
size_t m_n_recs
Number of user records in the page.
Definition: btr0mtib.h:1733
size_t m_data_size
Number of bytes of data.
Definition: btr0mtib.h:1736
Heap wrapper that destroys the heap instance when it goes out of scope.
Definition: mem0mem.h:439
Storage format for overflow data in a big record, that is, a clustered index record which needs exter...
Definition: data0data.h:854
The buffer control block structure.
Definition: buf0buf.h:1764
buf_page_t page
page information; this must be the first field, so that buf_pool->page_hash can point to buf_page_t o...
Definition: buf0buf.h:1770
bool is_memory() const noexcept
Definition: buf0buf.h:2011
Compare the keys of an index.
Definition: ddl0impl-compare.h:41
Structure for an SQL data field.
Definition: data0data.h:617
unsigned len
data length; UNIV_SQL_NULL if SQL null
Definition: data0data.h:623
void * data
pointer to data
Definition: data0data.h:618
Data structure for an index.
Definition: dict0mem.h:1041
unsigned space
space where the index tree is placed
Definition: dict0mem.h:1058
bool is_clustered() const
Definition: dict0mem.h:1306
dict_table_t * table
back pointer to table
Definition: dict0mem.h:1055
page_size_t get_page_size() const
Get the page size of the tablespace to which this index belongs.
Definition: dict0mem.cc:917
Data structure for a database table.
Definition: dict0mem.h:1913
table_name_t name
Table name.
Definition: dict0mem.h:1988
Structure for an SQL data tuple of fields (logical record)
Definition: data0data.h:696
File node of a tablespace or the log data space.
Definition: fil0fil.h:160
Tablespace or log data space.
Definition: fil0fil.h:240
The struct 'lob::ref_t' represents an external field reference.
Definition: lob0lob.h:198
The info structure stored at the beginning of a heap block.
Definition: mem0mem.h:302
Mini-transaction handle and buffer.
Definition: mtr0mtr.h:177
Definition: gen_lex_token.cc:149
char * m_name
The name in internal representation.
Definition: dict0mem.h:467
Definition: trx0trx.h:675
ib_id_t trx_id_t
Transaction identifier (DB_TRX_ID, DATA_TRX_ID)
Definition: trx0types.h:138
#define IF_DEBUG(...)
Definition: univ.i:674
unsigned long int ulint
Definition: univ.i:406
Utilities related to class lifecycle.
#define UT_LOCATION_HERE
Definition: ut0core.h:73
#define ut_ad(EXPR)
Debug assertion.
Definition: ut0dbg.h:105
#define ut_a(EXPR)
Abort execution if EXPR does not evaluate to nonzero.
Definition: ut0dbg.h:93
Dynamic memory allocation routines and custom allocators specifically crafted to support memory instr...
#define UT_NEW_THIS_FILE_PSI_KEY
Definition: ut0new.h:566
Manage a cache of objects.