MySQL 8.0.31
Source Code Documentation
buf0buf.h
Go to the documentation of this file.
1/*****************************************************************************
2
3Copyright (c) 1995, 2022, Oracle and/or its affiliates.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
8
9This program is also distributed with certain software (including but not
10limited to OpenSSL) that is licensed under separate terms, as designated in a
11particular file or component or in included license documentation. The authors
12of MySQL hereby grant you an additional permission to link the program and
13your derivative works with the separately licensed software that they have
14included with MySQL.
15
16This program is distributed in the hope that it will be useful, but WITHOUT
17ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25*****************************************************************************/
26
27/** @file include/buf0buf.h
28 The database buffer pool high-level routines
29
30 Created 11/5/1995 Heikki Tuuri
31 *******************************************************/
32
33#ifndef buf0buf_h
34#define buf0buf_h
35
36#include "buf0types.h"
37#include "fil0fil.h"
38#include "hash0hash.h"
39#include "mtr0types.h"
40#include "os0proc.h"
41#include "page0types.h"
42#include "srv0shutdown.h"
43#include "srv0srv.h"
44#include "univ.i"
45#include "ut0byte.h"
46#include "ut0rbt.h"
47
48#include "buf/buf.h"
49
50#include <ostream>
51
52// Forward declaration
53struct fil_addr_t;
54
55/** @name Modes for buf_page_get_gen */
56/** @{ */
57enum class Page_fetch {
58 /** Get always */
59 NORMAL,
60
61 /** Same as NORMAL, but hint that the fetch is part of a large scan.
62 Try not to flood the buffer pool with pages that may not be accessed again
63 any time soon. */
64 SCAN,
65
66 /** get if in pool */
68
69 /** get if in pool, do not make the block young in the LRU list */
71
72 /** get and bufferfix, but set no latch; we have separated this case, because
73 it is error-prone programming not to set a latch, and it should be used with
74 care */
76
77 /** Get the page only if it's in the buffer pool, if not then set a watch on
78 the page. */
80
81 /** Like Page_fetch::NORMAL, but do not mind if the file page has been
82 freed. */
84};
85/** @} */
86
87/** @name Modes for buf_page_get_known_nowait */
88
89/** @{ */
90enum class Cache_hint {
91 /** Move the block to the start of the LRU list if there is a danger that the
92 block would drift out of the buffer pool*/
93 MAKE_YOUNG = 51,
94
95 /** Preserve the current LRU position of the block. */
96 KEEP_OLD = 52
97};
98
99/** @} */
100
101/** Number of bits to representing a buffer pool ID */
103
104/** The maximum number of buffer pools that can be defined */
106
107/** Maximum number of concurrent buffer pool watches */
108#define BUF_POOL_WATCH_SIZE (srv_n_purge_threads + 1)
109
110/** The maximum number of page_hash locks */
111constexpr ulint MAX_PAGE_HASH_LOCKS = 1024;
112
113/** The buffer pools of the database */
115
116#ifdef UNIV_HOTBACKUP
117/** first block, for --apply-log */
118extern buf_block_t *back_block1;
119/** second block, for page reorganize */
120extern buf_block_t *back_block2;
121#endif /* UNIV_HOTBACKUP */
122
123/** @brief States of a control block
124@see buf_page_t
125
126The enumeration values must be 0..7. */
127enum buf_page_state : uint8_t {
128 /** A sentinel for the buffer pool watch, element of buf_pool->watch[] */
130 /** Contains a clean compressed page */
132 /** Contains a compressed page that is in the buf_pool->flush_list */
134
135 /** Is in the free list; must be after the BUF_BLOCK_ZIP_ constants for
136 compressed-only pages @see buf_block_state_valid() */
138
139 /** When buf_LRU_get_free_block returns a block, it is in this state */
141
142 /** Contains a buffered file page */
144
145 /** Contains some main memory object */
147
148 /** Hash index should be removed before putting to the free list */
151
152/** This structure defines information we will fetch from each buffer pool. It
153will be used to print table IO stats */
155 /* General buffer pool info */
156 ulint pool_unique_id; /*!< Buffer Pool ID */
157 ulint pool_size; /*!< Buffer Pool size in pages */
158 ulint lru_len; /*!< Length of buf_pool->LRU */
159 ulint old_lru_len; /*!< buf_pool->LRU_old_len */
160 ulint free_list_len; /*!< Length of buf_pool->free list */
161 ulint flush_list_len; /*!< Length of buf_pool->flush_list */
162 ulint n_pend_unzip; /*!< buf_pool->n_pend_unzip, pages
163 pending decompress */
164 ulint n_pend_reads; /*!< buf_pool->n_pend_reads, pages
165 pending read */
166 ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */
167 ulint n_pending_flush_single_page; /*!< Pages pending to be
168 flushed as part of single page
169 flushes issued by various user
170 threads */
171 ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH
172 LIST */
173 ulint n_pages_made_young; /*!< number of pages made young */
174 ulint n_pages_not_made_young; /*!< number of pages not made young */
175 ulint n_pages_read; /*!< buf_pool->n_pages_read */
176 ulint n_pages_created; /*!< buf_pool->n_pages_created */
177 ulint n_pages_written; /*!< buf_pool->n_pages_written */
178 ulint n_page_gets; /*!< buf_pool->n_page_gets */
179 ulint n_ra_pages_read_rnd; /*!< buf_pool->n_ra_pages_read_rnd,
180 number of pages readahead */
181 ulint n_ra_pages_read; /*!< buf_pool->n_ra_pages_read, number
182 of pages readahead */
183 ulint n_ra_pages_evicted; /*!< buf_pool->n_ra_pages_evicted,
184 number of readahead pages evicted
185 without access */
186 ulint n_page_get_delta; /*!< num of buffer pool page gets since
187 last printout */
188
189 /* Buffer pool access stats */
190 double page_made_young_rate; /*!< page made young rate in pages
191 per second */
192 double page_not_made_young_rate; /*!< page not made young rate
193 in pages per second */
194 double pages_read_rate; /*!< num of pages read per second */
195 double pages_created_rate; /*!< num of pages create per second */
196 double pages_written_rate; /*!< num of pages written per second */
197 ulint page_read_delta; /*!< num of pages read since last
198 printout */
199 ulint young_making_delta; /*!< num of pages made young since
200 last printout */
201 ulint not_young_making_delta; /*!< num of pages not make young since
202 last printout */
203
204 /* Statistics about read ahead algorithm. */
205 double pages_readahead_rnd_rate; /*!< random readahead rate in pages per
206 second */
207 double pages_readahead_rate; /*!< readahead rate in pages per
208 second */
209 double pages_evicted_rate; /*!< rate of readahead page evicted
210 without access, in pages per second */
211
212 /* Stats about LRU eviction */
213 ulint unzip_lru_len; /*!< length of buf_pool->unzip_LRU
214 list */
215 /* Counters for LRU policy */
216 ulint io_sum; /*!< buf_LRU_stat_sum.io */
217 ulint io_cur; /*!< buf_LRU_stat_cur.io, num of IO
218 for current interval */
219 ulint unzip_sum; /*!< buf_LRU_stat_sum.unzip */
220 ulint unzip_cur; /*!< buf_LRU_stat_cur.unzip, num
221 pages decompressed in current
222 interval */
223};
224
225/** The occupied bytes of lists in all buffer pools */
227 ulint LRU_bytes; /*!< LRU size in bytes */
228 ulint unzip_LRU_bytes; /*!< unzip_LRU size in bytes */
229 ulint flush_list_bytes; /*!< flush_list size in bytes */
230};
231
232#ifndef UNIV_HOTBACKUP
233/** Creates the buffer pool.
234@param[in] total_size Size of the total pool in bytes.
235@param[in] n_instances Number of buffer pool instances to create.
236@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
237dberr_t buf_pool_init(ulint total_size, ulint n_instances);
238
239/** Frees the buffer pool at shutdown. This must not be invoked before
240 freeing all mutexes. */
241void buf_pool_free_all();
242
243/** Determines if a block is intended to be withdrawn. The caller must ensure
244that there was a sufficient memory barrier to read curr_size and old_size.
245@param[in] buf_pool buffer pool instance
246@param[in] block pointer to control block
247@retval true if will be withdrawn */
248bool buf_block_will_withdrawn(buf_pool_t *buf_pool, const buf_block_t *block);
249
250/** Determines if a frame is intended to be withdrawn. The caller must ensure
251that there was a sufficient memory barrier to read curr_size and old_size.
252@param[in] buf_pool buffer pool instance
253@param[in] ptr pointer to a frame
254@retval true if will be withdrawn */
255bool buf_frame_will_withdrawn(buf_pool_t *buf_pool, const byte *ptr);
256
257/** This is the thread for resizing buffer pool. It waits for an event and
258when waked up either performs a resizing and sleeps again. */
259void buf_resize_thread();
260
261/** Checks if innobase_should_madvise_buf_pool() value has changed since we've
262last check and if so, then updates buf_pool_should_madvise and calls madvise
263for all chunks in all srv_buf_pool_instances.
264@see buf_pool_should_madvise comment for a longer explanation. */
266
267/** Clears the adaptive hash index on all pages in the buffer pool. */
269
270/** Gets the current size of buffer buf_pool in bytes.
271 @return size in bytes */
272static inline ulint buf_pool_get_curr_size(void);
273/** Gets the current size of buffer buf_pool in frames.
274 @return size in pages */
275static inline ulint buf_pool_get_n_pages(void);
276#endif /* !UNIV_HOTBACKUP */
277
278/** Gets the smallest oldest_modification lsn among all of the earliest
279added pages in flush lists. In other words - takes the last dirty page
280from each flush list, and calculates minimum oldest_modification among
281all of them. Does not acquire global lock for the whole process, so the
282result might come from inconsistent view on flush lists.
283
284@note Note that because of the relaxed order in each flush list, this
285functions no longer returns the smallest oldest_modification among all
286of the dirty pages. If you wanted to have a safe lsn, which is smaller
287than every oldest_modification, you would need to use another function:
288 buf_pool_get_oldest_modification_lwm().
289
290Returns zero if there were no dirty pages (flush lists were empty).
291
292@return minimum oldest_modification of last pages from flush lists,
293 zero if flush lists were empty */
295
296/** Gets a safe low watermark for oldest_modification. It's guaranteed
297that there were no dirty pages with smaller oldest_modification in the
298whole flush lists.
299
300Returns zero if flush lists were empty, be careful in such case, because
301taking the newest lsn is probably not a good idea. If you wanted to rely
302on some lsn in such case, you would need to follow pattern:
303
304 dpa_lsn = log_buffer_dirty_pages_added_up_to_lsn(*log_sys);
305
306 lwm_lsn = buf_pool_get_oldest_modification_lwm();
307
308 if (lwm_lsn == 0) lwm_lsn = dpa_lsn;
309
310The order is important to avoid race conditions.
311
312@remarks
313It's guaranteed that the returned value will not be smaller than the
314last checkpoint lsn. It's not guaranteed that the returned value is
315the maximum possible. It's just the best effort for the low cost.
316It basically takes result of buf_pool_get_oldest_modification_approx()
317and subtracts maximum possible lag introduced by relaxed order in
318flush lists (srv_log_recent_closed_size).
319
320@return safe low watermark for oldest_modification of dirty pages,
321 or zero if flush lists were empty; if non-zero, it is then
322 guaranteed not to be at block boundary (and it points to lsn
323 inside data fragment of block) */
325
326#ifndef UNIV_HOTBACKUP
327
328/** Allocates a buf_page_t descriptor. This function must succeed. In case
329 of failure we assert in this function. */
331 MY_ATTRIBUTE((malloc));
332
333/** Free a buf_page_t descriptor.
334@param[in] bpage bpage descriptor to free */
336
337/** Allocates a buffer block.
338 @return own: the allocated block, in state BUF_BLOCK_MEMORY */
340 buf_pool_t *buf_pool); /*!< in: buffer pool instance,
341 or NULL for round-robin selection
342 of the buffer pool */
343/** Frees a buffer block which does not contain a file page. */
344static inline void buf_block_free(
345 buf_block_t *block); /*!< in, own: block to be freed */
346#endif /* !UNIV_HOTBACKUP */
347
348/** Copies contents of a buffer frame to a given buffer.
349@param[in] buf buffer to copy to
350@param[in] frame buffer frame
351@return buf */
352static inline byte *buf_frame_copy(byte *buf, const buf_frame_t *frame);
353
354#ifndef UNIV_HOTBACKUP
355/** This is the general function used to get optimistic access to a database
356page.
357@param[in] rw_latch RW_S_LATCH, RW_X_LATCH
358@param[in,out] block Guessed block
359@param[in] modify_clock Modify clock value
360@param[in] fetch_mode Fetch mode
361@param[in] file File name
362@param[in] line Line where called
363@param[in,out] mtr Mini-transaction
364@return true if success */
365bool buf_page_optimistic_get(ulint rw_latch, buf_block_t *block,
366 uint64_t modify_clock, Page_fetch fetch_mode,
367 const char *file, ulint line, mtr_t *mtr);
368
369/** This is used to get access to a known database page, when no waiting can be
370done.
371@param[in] rw_latch RW_S_LATCH or RW_X_LATCH.
372@param[in] block The known page.
373@param[in] hint Cache_hint::MAKE_YOUNG or Cache_hint::KEEP_OLD
374@param[in] file File name from where it was called.
375@param[in] line Line from where it was called.
376@param[in,out] mtr Mini-transaction covering the fetch
377@return true if success */
378bool buf_page_get_known_nowait(ulint rw_latch, buf_block_t *block,
379 Cache_hint hint, const char *file, ulint line,
380 mtr_t *mtr);
381
382/** Given a tablespace id and page number tries to get that page. If the
383page is not in the buffer pool it is not loaded and NULL is returned.
384Suitable for using when holding the lock_sys latches (as it avoids deadlock).
385@param[in] page_id page Id
386@param[in] location Location where called
387@param[in] mtr Mini-transaction
388@return pointer to a page or NULL */
389const buf_block_t *buf_page_try_get(const page_id_t &page_id,
390 ut::Location location, mtr_t *mtr);
391
392/** Get read access to a compressed page (usually of type
393FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2).
394The page must be released with buf_page_release_zip().
395NOTE: the page is not protected by any latch. Mutual exclusion has to
396be implemented at a higher level. In other words, all possible
397accesses to a given page through this function must be protected by
398the same set of mutexes or latches.
399@param[in] page_id page id
400@param[in] page_size page size
401@return pointer to the block */
403 const page_size_t &page_size);
404
405/** This is the general function used to get access to a database page.
406@param[in] page_id Page id
407@param[in] page_size Page size
408@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
409@param[in] guess Guessed block or NULL
410@param[in] mode Fetch mode.
411@param[in] location Location from where this method was called.
412@param[in] mtr Mini-transaction
413@param[in] dirty_with_no_latch Mark page as dirty even if page is being
414 pinned without any latch
415@return pointer to the block or NULL */
417 const page_size_t &page_size, ulint rw_latch,
419 ut::Location location, mtr_t *mtr,
420 bool dirty_with_no_latch = false);
421
422/** NOTE! The following macros should be used instead of buf_page_get_gen,
423 to improve debugging. Only values RW_S_LATCH and RW_X_LATCH are allowed
424 in LA! */
425inline buf_block_t *buf_page_get(const page_id_t &id, const page_size_t &size,
426 ulint latch, ut::Location location,
427 mtr_t *mtr) {
428 return buf_page_get_gen(id, size, latch, nullptr, Page_fetch::NORMAL,
429 location, mtr);
430}
431/** Use these macros to bufferfix a page with no latching. Remember not to
432 read the contents of the page unless you know it is safe. Do not modify
433 the contents of the page! We have separated this case, because it is
434 error-prone programming not to set a latch, and it should be used
435 with care. */
437 const page_size_t &size,
438 ut::Location location,
439 mtr_t *mtr) {
440 return buf_page_get_gen(id, size, RW_NO_LATCH, nullptr, Page_fetch::NO_LATCH,
441 location, mtr);
442}
443
444/** Initializes a page to the buffer buf_pool. The page is usually not read
445from a file even if it cannot be found in the buffer buf_pool. This is one
446of the functions which perform to a block a state transition NOT_USED =>
447FILE_PAGE (the other is buf_page_get_gen). The page is latched by passed mtr.
448@param[in] page_id Page id
449@param[in] page_size Page size
450@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
451@param[in] mtr Mini-transaction
452@return pointer to the block, page bufferfixed */
454 const page_size_t &page_size,
455 rw_lock_type_t rw_latch, mtr_t *mtr);
456
457#else /* !UNIV_HOTBACKUP */
458
459/** Inits a page to the buffer buf_pool, for use in mysqlbackup --restore.
460@param[in] page_id page id
461@param[in] page_size page size
462@param[in,out] block block to init */
463void meb_page_init(const page_id_t &page_id, const page_size_t &page_size,
464 buf_block_t *block);
465#endif /* !UNIV_HOTBACKUP */
466
467#ifndef UNIV_HOTBACKUP
468/** Releases a compressed-only page acquired with buf_page_get_zip(). */
469static inline void buf_page_release_zip(
470 buf_page_t *bpage); /*!< in: buffer block */
471
472/** Releases a latch, if specified.
473@param[in] block buffer block
474@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
475static inline void buf_page_release_latch(buf_block_t *block, ulint rw_latch);
476
477/** Moves a page to the start of the buffer pool LRU list. This high-level
478function can be used to prevent an important page from slipping out of
479the buffer pool.
480@param[in,out] bpage buffer block of a file page */
482
483/** Moved a page to the end of the buffer pool LRU list so that it can be
484flushed out at the earliest.
485@param[in] bpage buffer block of a file page */
486void buf_page_make_old(buf_page_t *bpage);
487
488/** Returns true if the page can be found in the buffer pool hash table.
489NOTE that it is possible that the page is not yet read from disk,
490though.
491@param[in] page_id page id
492@return true if found in the page hash table */
493static inline bool buf_page_peek(const page_id_t &page_id);
494
495#ifdef UNIV_DEBUG
496
497/** Sets file_page_was_freed true if the page is found in the buffer pool.
498This function should be called when we free a file page and want the
499debug version to check that it is not accessed any more unless
500reallocated.
501@param[in] page_id page id
502@return control block if found in page hash table, otherwise NULL */
504
505/** Sets file_page_was_freed false if the page is found in the buffer pool.
506This function should be called when we free a file page and want the
507debug version to check that it is not accessed any more unless
508reallocated.
509@param[in] page_id page id
510@return control block if found in page hash table, otherwise NULL */
512
513#endif /* UNIV_DEBUG */
514/** Reads the freed_page_clock of a buffer block.
515 @return freed_page_clock */
516[[nodiscard]] static inline ulint buf_page_get_freed_page_clock(
517 const buf_page_t *bpage); /*!< in: block */
518/** Reads the freed_page_clock of a buffer block.
519 @return freed_page_clock */
520[[nodiscard]] static inline ulint buf_block_get_freed_page_clock(
521 const buf_block_t *block); /*!< in: block */
522
523/** Tells, for heuristics, if a block is still close enough to the MRU end of
524the LRU list meaning that it is not in danger of getting evicted and also
525implying that it has been accessed recently.
526The page must be either buffer-fixed, either its page hash must be locked.
527@param[in] bpage block
528@return true if block is close to MRU end of LRU */
529static inline bool buf_page_peek_if_young(const buf_page_t *bpage);
530
531/** Recommends a move of a block to the start of the LRU list if there is
532danger of dropping from the buffer pool.
533NOTE: does not reserve the LRU list mutex.
534@param[in] bpage block to make younger
535@return true if should be made younger */
536static inline bool buf_page_peek_if_too_old(const buf_page_t *bpage);
537
538/** Gets the youngest modification log sequence number for a frame.
539 Returns zero if not file page or no modification occurred yet.
540 @return newest modification to page */
542 const buf_page_t *bpage); /*!< in: block containing the
543 page frame */
544
545/** Increment the modify clock.
546The caller must
547(1) own the buf_pool->mutex and block bufferfix count has to be zero,
548(2) own X or SX latch on the block->lock, or
549(3) operate on a thread-private temporary table
550@param[in,out] block buffer block */
551static inline void buf_block_modify_clock_inc(buf_block_t *block);
552
553/** Increments the bufferfix count.
554@param[in] location location
555@param[in,out] block block to bufferfix */
557 buf_block_t *block);
558
559/** Increments the bufferfix count.
560@param[in,out] bpage block to bufferfix
561@return the count */
562static inline ulint buf_block_fix(buf_page_t *bpage);
563
564/** Increments the bufferfix count.
565@param[in,out] block block to bufferfix
566@return the count */
567static inline ulint buf_block_fix(buf_block_t *block);
568
569/** Decrements the bufferfix count.
570@param[in,out] bpage block to bufferunfix
571@return the remaining buffer-fix count */
572static inline ulint buf_block_unfix(buf_page_t *bpage);
573
574/** Decrements the bufferfix count.
575@param[in,out] block block to bufferunfix
576@return the remaining buffer-fix count */
577static inline ulint buf_block_unfix(buf_block_t *block);
578
579/** Unfixes the page, unlatches the page,
580removes it from page_hash and removes it from LRU.
581@param[in,out] bpage pointer to the block */
583
584/** Increments the bufferfix count.
585@param[in,out] b block to bufferfix
586@param[in] l location where requested */
588 ut::Location l [[maybe_unused]]) {
590}
591#else /* !UNIV_HOTBACKUP */
592static inline void buf_block_modify_clock_inc(buf_block_t *block) {}
593#endif /* !UNIV_HOTBACKUP */
594
595#ifndef UNIV_HOTBACKUP
596
597/** Gets the space id, page offset, and byte offset within page of a pointer
598pointing to a buffer frame containing a file page.
599@param[in] ptr pointer to a buffer frame
600@param[out] space space id
601@param[out] addr page offset and byte offset */
602static inline void buf_ptr_get_fsp_addr(const void *ptr, space_id_t *space,
603 fil_addr_t *addr);
604
605#ifdef UNIV_DEBUG
606/** Finds a block in the buffer pool that points to a
607given compressed page. Used only to confirm that buffer pool does not contain a
608given pointer, thus protected by zip_free_mutex.
609@param[in] buf_pool buffer pool instance
610@param[in] data pointer to compressed page
611@return buffer block pointing to the compressed page, or NULL */
612buf_block_t *buf_pool_contains_zip(buf_pool_t *buf_pool, const void *data);
613#endif /* UNIV_DEBUG */
614
615/***********************************************************************
616FIXME_FTS: Gets the frame the pointer is pointing to. */
618 /* out: pointer to frame */
619 byte *ptr); /* in: pointer to a frame */
620
621#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
622/** Validates the buffer pool data structure.
623 @return true */
624bool buf_validate(void);
625#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
626#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
627/** Prints info of the buffer pool data structure. */
628void buf_print(void);
629#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
630#endif /* !UNIV_HOTBACKUP */
632 /** Do not crash at the end of buf_page_print(). */
634 /** Do not print the full page dump. */
637
638/** Prints a page to stderr.
639@param[in] read_buf a database page
640@param[in] page_size page size
641@param[in] flags 0 or BUF_PAGE_PRINT_NO_CRASH or
642BUF_PAGE_PRINT_NO_FULL */
643void buf_page_print(const byte *read_buf, const page_size_t &page_size,
644 ulint flags);
645
646/** Decompress a block.
647 @return true if successful */
648bool buf_zip_decompress(buf_block_t *block, /*!< in/out: block */
649 bool check); /*!< in: true=verify the page checksum */
650#ifndef UNIV_HOTBACKUP
651#ifdef UNIV_DEBUG
652/** Returns the number of latched pages in the buffer pool.
653 @return number of latched pages */
655#endif /* UNIV_DEBUG */
656/** Returns the number of pending buf pool read ios.
657 @return number of pending read I/O operations */
659/** Prints info of the buffer i/o. */
660void buf_print_io(FILE *file); /*!< in: file where to print */
661/** Collect buffer pool stats information for a buffer pool. Also
662 record aggregated stats if there are more than one buffer pool
663 in the server */
665 buf_pool_t *buf_pool, /*!< in: buffer pool */
666 ulint pool_id, /*!< in: buffer pool ID */
667 buf_pool_info_t *all_pool_info); /*!< in/out: buffer pool info
668 to fill */
669/** Return the ratio in percents of modified pages in the buffer pool /
670database pages in the buffer pool.
671@return modified page percentage ratio */
672double buf_get_modified_ratio_pct(void);
673/** Refresh the statistics used to print per-second averages. */
675
676/** Assert that all file pages in the buffer are in a replaceable state. */
677void buf_must_be_all_freed(void);
678
679/** Computes number of pending I/O read operations for the buffer pool.
680@return number of pending i/o reads */
682
683/** Computes number of pending I/O write operations for the buffer pool.
684@return number of pending i/o writes */
686
687/** Waits until there are no pending I/O read operations for the buffer pool.
688Keep waiting in loop with sleeps, emitting information every minute.
689This is used to avoid risk of some pending async read (e.g. enqueued by
690the linear read-ahead), which would involve ibuf merge and create new
691redo records. */
693
694/** Invalidates the file pages in the buffer pool when an archive recovery is
695 completed. All the file pages buffered must be in a replaceable state when
696 this function is called: not latched and not modified. */
697void buf_pool_invalidate(void);
698
699/*========================================================================
700--------------------------- LOWER LEVEL ROUTINES -------------------------
701=========================================================================*/
702
703#ifdef UNIV_DEBUG
704/** Adds latch level info for the rw-lock protecting the buffer frame. This
705should be called in the debug version after a successful latching of a page if
706we know the latching order level of the acquired latch.
707@param[in] block buffer page where we have acquired latch
708@param[in] level latching order level */
709static inline void buf_block_dbg_add_level(buf_block_t *block,
710 latch_level_t level);
711#else /* UNIV_DEBUG */
712#define buf_block_dbg_add_level(block, level) /* nothing */
713#endif /* UNIV_DEBUG */
714
715#endif /* !UNIV_HOTBACKUP */
716
717/** Gets the state of a block.
718 @return state */
720 const buf_page_t *bpage); /*!< in: pointer to the control block */
721/** Gets the state of a block.
722 @return state */
723[[nodiscard]] static inline enum buf_page_state buf_block_get_state(
724 const buf_block_t *block); /*!< in: pointer to the control block */
725
726/** Sets the state of a block.
727@param[in,out] bpage pointer to control block
728@param[in] state state */
729static inline void buf_page_set_state(buf_page_t *bpage,
730 enum buf_page_state state);
731
732/** Sets the state of a block.
733@param[in,out] block pointer to control block
734@param[in] state state */
735static inline void buf_block_set_state(buf_block_t *block,
736 enum buf_page_state state);
737
738/** Determines if a block is mapped to a tablespace.
739 @return true if mapped */
740[[nodiscard]] static inline bool buf_page_in_file(
741 const buf_page_t *bpage); /*!< in: pointer to control block */
742#ifndef UNIV_HOTBACKUP
743/** Determines if a block should be on unzip_LRU list.
744 @return true if block belongs to unzip_LRU */
745[[nodiscard]] static inline bool buf_page_belongs_to_unzip_LRU(
746 const buf_page_t *bpage); /*!< in: pointer to control block */
747
748/** Gets the mutex of a block.
749 @return pointer to mutex protecting bpage */
750[[nodiscard]] static inline BPageMutex *buf_page_get_mutex(
751 const buf_page_t *bpage); /*!< in: pointer to control block */
752
753/** Get the flush type of a page.
754 @return flush type */
755[[nodiscard]] static inline buf_flush_t buf_page_get_flush_type(
756 const buf_page_t *bpage); /*!< in: buffer page */
757
758/** Set the flush type of a page.
759@param[in] bpage buffer page
760@param[in] flush_type flush type */
761static inline void buf_page_set_flush_type(buf_page_t *bpage,
763
764/** Map a block to a file page.
765@param[in,out] block pointer to control block
766@param[in] page_id page id */
767static inline void buf_block_set_file_page(buf_block_t *block,
768 const page_id_t &page_id);
769
770/** Gets the io_fix state of a block.
771 @return io_fix state */
772[[nodiscard]] static inline enum buf_io_fix buf_page_get_io_fix(
773 const buf_page_t *bpage); /*!< in: pointer to the control block */
774/** Gets the io_fix state of a block.
775 @return io_fix state */
776[[nodiscard]] static inline enum buf_io_fix buf_block_get_io_fix(
777 const buf_block_t *block); /*!< in: pointer to the control block */
778
779/** Sets the io_fix state of a block.
780@param[in,out] bpage control block
781@param[in] io_fix io_fix state */
782static inline void buf_page_set_io_fix(buf_page_t *bpage,
783 enum buf_io_fix io_fix);
784
785/** Sets the io_fix state of a block.
786@param[in,out] block control block
787@param[in] io_fix io_fix state */
788static inline void buf_block_set_io_fix(buf_block_t *block,
789 enum buf_io_fix io_fix);
790
791/** Makes a block sticky. A sticky block implies that even after we release
792the buf_pool->LRU_list_mutex and the block->mutex:
793* it cannot be removed from the flush_list
794* the block descriptor cannot be relocated
795* it cannot be removed from the LRU list
796Note that:
797* the block can still change its position in the LRU list
798* the next and previous pointers can change.
799@param[in,out] bpage control block */
800static inline void buf_page_set_sticky(buf_page_t *bpage);
801
802/** Removes stickiness of a block. */
803static inline void buf_page_unset_sticky(
804 buf_page_t *bpage); /*!< in/out: control block */
805/** Determine if a buffer block can be relocated in memory. The block
806 can be dirty, but it must not be I/O-fixed or bufferfixed. */
807[[nodiscard]] static inline bool buf_page_can_relocate(
808 const buf_page_t *bpage); /*!< control block being relocated */
809
810/** Determine if a block has been flagged old.
811@param[in] bpage control block
812@return true if old */
813[[nodiscard]] static inline bool buf_page_is_old(const buf_page_t *bpage);
814
815/** Flag a block old.
816@param[in,out] bpage control block
817@param[in] old old */
818static inline void buf_page_set_old(buf_page_t *bpage, bool old);
819
820/** Determine the time of first access of a block in the buffer pool.
821 @return Time of first access, zero if not accessed
822 */
823[[nodiscard]] static inline std::chrono::steady_clock::time_point
824buf_page_is_accessed(const buf_page_t *bpage); /*!< in: control block */
825/** Flag a block accessed. */
826static inline void buf_page_set_accessed(
827 buf_page_t *bpage); /*!< in/out: control block */
828
829/** Gets the buf_block_t handle of a buffered file block if an uncompressed
830page frame exists, or NULL. page frame exists, or NULL. The caller must hold
831either the appropriate hash lock in any mode, either the LRU list mutex. Note:
832even though bpage is not declared a const we don't update its value. It is safe
833to make this pure.
834@param[in] bpage control block, or NULL
835@return control block, or NULL */
836[[nodiscard]] static inline buf_block_t *buf_page_get_block(buf_page_t *bpage);
837#ifdef UNIV_DEBUG
838/** Gets a pointer to the memory frame of a block.
839 @return pointer to the frame */
840[[nodiscard]] static inline buf_frame_t *buf_block_get_frame(
841 const buf_block_t *block); /*!< in: pointer to the control block */
842#else /* UNIV_DEBUG */
843#define buf_block_get_frame(block) (block)->frame
844#endif /* UNIV_DEBUG */
845#else /* !UNIV_HOTBACKUP */
846#define buf_block_get_frame(block) (block)->frame
847#endif /* !UNIV_HOTBACKUP */
848
849/** Get a buffer block from an adaptive hash index pointer.
850This function does not return if the block is not identified.
851@param[in] ptr pointer to within a page frame
852@return pointer to block, never NULL */
853buf_block_t *buf_block_from_ahi(const byte *ptr);
854
855/** Find out if a block pointer points into one of currently used chunks of
856the buffer pool. This is useful if you stored the pointer some time ago, and
857want to dereference it now, and are afraid that buffer pool resize could free
858the memory pointed by it. Thus calling this function requires holding at least
859one of the latches which prevent freeing memory from buffer pool for the
860duration of the call and until you pin the block in some other way, as otherwise
861the result of this function might be obsolete by the time you dereference the
862block (an s-latch on buf_page_hash_lock_get for any hash cell is enough).
863@param buf_pool The buffer pool instance to search in.
864@param ptr A pointer which you want to check. This function will not
865 dereference it.
866@return true iff `block` points inside one of the chunks of the `buf_pool`
867*/
868bool buf_is_block_in_instance(const buf_pool_t *buf_pool,
869 const buf_block_t *ptr);
870
871#ifndef UNIV_HOTBACKUP
872
873/** Inits a page for read to the buffer buf_pool. If the page is
874(1) already in buf_pool, or
875(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
876(3) if the space is deleted or being deleted,
877then this function does nothing.
878Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
879on the buffer frame. The io-handler must take care that the flag is cleared
880and the lock released later.
881@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED
882@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...
883@param[in] page_id page id
884@param[in] page_size page size
885@param[in] unzip true=request uncompressed page
886@return pointer to the block or NULL */
888 const page_id_t &page_id,
889 const page_size_t &page_size, bool unzip);
890
891/** Completes an asynchronous read or write request of a file page to or from
892the buffer pool.
893@param[in] bpage pointer to the block in question
894@param[in] evict whether or not to evict the page from LRU list
895@return true if successful */
896bool buf_page_io_complete(buf_page_t *bpage, bool evict);
897
898/** Free a stale page. Caller must hold the LRU mutex. Upon successful page
899free the LRU mutex will be released.
900@param[in,out] buf_pool Buffer pool the page belongs to.
901@param[in,out] bpage Page to free.
902@return true if page was freed. */
903bool buf_page_free_stale(buf_pool_t *buf_pool, buf_page_t *bpage) noexcept;
904
905/** Free a stale page. Caller must be holding the hash_lock in S mode if
906hash_lock parameter is not nullptr. The hash lock will be released upon return
907always. Caller must hold the LRU mutex if and only if the hash_lock parameter
908is nullptr. Upon unsuccessful page free the LRU mutex will not be released if
909hash_lock is nullptr.
910@param[in,out] buf_pool Buffer pool the page belongs to.
911@param[in,out] bpage Page to free.
912@param[in,out] hash_lock Hash lock covering the fetch from the hash table if
913latched in S mode. nullptr otherwise.
914@return true if page was freed. */
915bool buf_page_free_stale(buf_pool_t *buf_pool, buf_page_t *bpage,
916 rw_lock_t *hash_lock) noexcept;
917
918/** Free a stale page that is being written. The caller must be within the
919page's write code path.
920@param[in,out] bpage Page to free.
921@param[in] owns_sx_lock SX lock on block->lock is set. */
923 bool owns_sx_lock = false) noexcept;
924
925/** Calculates the index of a buffer pool to the buf_pool[] array.
926 @return the position of the buffer pool in buf_pool[] */
927[[nodiscard]] static inline ulint buf_pool_index(
928 const buf_pool_t *buf_pool); /*!< in: buffer pool */
929/** Returns the buffer pool instance given a page instance
930 @return buf_pool */
932 const buf_page_t *bpage); /*!< in: buffer pool page */
933/** Returns the buffer pool instance given a block instance
934 @return buf_pool */
936 const buf_block_t *block); /*!< in: block */
937
938/** Returns the buffer pool instance given a page id.
939@param[in] page_id page id
940@return buffer pool */
941static inline buf_pool_t *buf_pool_get(const page_id_t &page_id);
942
943/** Returns the buffer pool instance given its array index
944 @return buffer pool */
946 ulint index); /*!< in: array index to get
947 buffer pool instance from */
948
949/** Returns the control block of a file page, NULL if not found.
950@param[in] buf_pool buffer pool instance
951@param[in] page_id page id
952@return block, NULL if not found */
954 const page_id_t &page_id);
955
956/** Returns the control block of a file page, NULL if not found.
957If the block is found and lock is not NULL then the appropriate
958page_hash lock is acquired in the specified lock mode. Otherwise,
959mode value is ignored. It is up to the caller to release the
960lock. If the block is found and the lock is NULL then the page_hash
961lock is released by this function.
962@param[in] buf_pool buffer pool instance
963@param[in] page_id page id
964@param[in,out] lock lock of the page hash acquired if bpage is
965found, NULL otherwise. If NULL is passed then the hash_lock is released by
966this function.
967@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
968lock == NULL
969@param[in] watch if true, return watch sentinel also.
970@return pointer to the bpage or NULL; if NULL, lock is also NULL or
971a watch sentinel. */
973 const page_id_t &page_id,
974 rw_lock_t **lock,
976 bool watch = false);
977
978/** Returns the control block of a file page, NULL if not found.
979If the block is found and lock is not NULL then the appropriate
980page_hash lock is acquired in the specified lock mode. Otherwise,
981mode value is ignored. It is up to the caller to release the
982lock. If the block is found and the lock is NULL then the page_hash
983lock is released by this function.
984@param[in] buf_pool buffer pool instance
985@param[in] page_id page id
986@param[in,out] lock lock of the page hash acquired if bpage is
987found, NULL otherwise. If NULL is passed then the hash_lock is released by
988this function.
989@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
990lock == NULL
991@return pointer to the block or NULL; if NULL, lock is also NULL. */
993 const page_id_t &page_id,
994 rw_lock_t **lock,
996
997/* There are four different ways we can try to get a bpage or block
998from the page hash:
9991) Caller already holds the appropriate page hash lock: in the case call
1000buf_page_hash_get_low() function.
10012) Caller wants to hold page hash lock in x-mode
10023) Caller wants to hold page hash lock in s-mode
10034) Caller doesn't want to hold page hash lock */
1005 const page_id_t &page_id,
1006 rw_lock_t **l) {
1007 return buf_page_hash_get_locked(b, page_id, l, RW_LOCK_S);
1008}
1010 const page_id_t &page_id,
1011 rw_lock_t **l) {
1012 return buf_page_hash_get_locked(b, page_id, l, RW_LOCK_X);
1013}
1015 return buf_page_hash_get_locked(b, page_id, nullptr, 0);
1016}
1018 const page_id_t &page_id) {
1019 return buf_page_hash_get_locked(b, page_id, nullptr, 0, true);
1020}
1021
1023 const page_id_t &page_id,
1024 rw_lock_t **l) {
1025 return buf_block_hash_get_locked(b, page_id, l, RW_LOCK_S);
1026}
1028 const page_id_t &page_id,
1029 rw_lock_t **l) {
1030 return buf_block_hash_get_locked(b, page_id, l, RW_LOCK_X);
1031}
1033 const page_id_t &page_id) {
1034 return buf_block_hash_get_locked(b, page_id, nullptr, 0);
1035}
1036
1037/** Gets the current length of the free list of buffer blocks.
1038 @return length of the free list */
1040
1041/** Determine if a block is a sentinel for a buffer pool watch.
1042@param[in] buf_pool buffer pool instance
1043@param[in] bpage block
1044@return true if a sentinel for a buffer pool watch, false if not */
1045[[nodiscard]] bool buf_pool_watch_is_sentinel(const buf_pool_t *buf_pool,
1046 const buf_page_t *bpage);
1047
1048/** Stop watching if the page has been read in.
1049buf_pool_watch_set(same_page_id) must have returned NULL before.
1050@param[in] page_id page id */
1051void buf_pool_watch_unset(const page_id_t &page_id);
1052
1053/** Check if the page has been read in.
1054This may only be called after buf_pool_watch_set(same_page_id)
1055has returned NULL and before invoking buf_pool_watch_unset(same_page_id).
1056@param[in] page_id page id
1057@return false if the given page was not read in, true if it was */
1058[[nodiscard]] bool buf_pool_watch_occurred(const page_id_t &page_id);
1059
1060/** Get total buffer pool statistics.
1061@param[out] LRU_len Length of all lru lists
1062@param[out] free_len Length of all free lists
1063@param[out] flush_list_len Length of all flush lists */
1064void buf_get_total_list_len(ulint *LRU_len, ulint *free_len,
1065 ulint *flush_list_len);
1066
1067/** Get total list size in bytes from all buffer pools. */
1069 buf_pools_list_size_t *buf_pools_list_size); /*!< out: list sizes
1070 in all buffer pools */
1071/** Get total buffer pool statistics. */
1073 buf_pool_stat_t *tot_stat); /*!< out: buffer pool stats */
1074
1075/** Get the nth chunk's buffer block in the specified buffer pool.
1076@param[in] buf_pool buffer pool instance
1077@param[in] n nth chunk in the buffer pool
1078@param[in] chunk_size chunk_size
1079@return the nth chunk's buffer block. */
1080static inline buf_block_t *buf_get_nth_chunk_block(const buf_pool_t *buf_pool,
1081 ulint n, ulint *chunk_size);
1082
1083/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
1084if needed.
1085@param[in] size size in bytes
1086@return aligned size */
1087static inline ulint buf_pool_size_align(ulint size);
1088
1089/** Adjust the proposed chunk unit size so that it satisfies all invariants
1090@param[in] size proposed size of buffer pool chunk unit in bytes
1091@return adjusted size which meets invariants */
1093
1094/** Calculate the checksum of a page from compressed table and update the
1095page.
1096@param[in,out] page page to update
1097@param[in] size compressed page size
1098@param[in] lsn LSN to stamp on the page
1099@param[in] skip_lsn_check true to skip check for lsn (in DEBUG) */
1101 bool skip_lsn_check);
1102
1103/** Return how many more pages must be added to the withdraw list to reach the
1104withdraw target of the currently ongoing buffer pool resize.
1105@param[in] buf_pool buffer pool instance
1106@return page count to be withdrawn or zero if the target is already achieved or
1107if the buffer pool is not currently being resized. */
1108static inline ulint buf_get_withdraw_depth(buf_pool_t *buf_pool);
1109
1110#endif /* !UNIV_HOTBACKUP */
1111
1112/** The common buffer control block structure
1113for compressed and uncompressed frames */
1114
1115/** Number of bits used for buffer page states. */
1116constexpr uint32_t BUF_PAGE_STATE_BITS = 3;
1117
1118template <typename T>
1119class copyable_atomic_t : public std::atomic<T> {
1120 public:
1122 : std::atomic<T>(other.load(std::memory_order_relaxed)) {}
1123};
1124
1127 public:
1128 /** Copy constructor.
1129 @param[in] other Instance to copy from. */
1131 : id(other.id),
1132 size(other.size),
1134 io_fix(other.io_fix),
1135 state(other.state),
1136 flush_type(other.flush_type),
1138#ifndef UNIV_HOTBACKUP
1139 hash(other.hash),
1140#endif /* !UNIV_HOTBACKUP */
1141 list(other.list),
1144 LRU(other.LRU),
1145 zip(other.zip)
1146#ifndef UNIV_HOTBACKUP
1147 ,
1149 m_space(other.m_space),
1151 m_version(other.m_version),
1152 access_time(other.access_time),
1153 m_dblwr_id(other.m_dblwr_id),
1154 old(other.old)
1155#ifdef UNIV_DEBUG
1156 ,
1160 in_LRU_list(other.in_LRU_list),
1163#endif /* UNIV_DEBUG */
1164#endif /* !UNIV_HOTBACKUP */
1165 {
1166#ifndef UNIV_HOTBACKUP
1167 m_space->inc_ref();
1168#endif /* !UNIV_HOTBACKUP */
1169 }
1170
1171#ifndef UNIV_HOTBACKUP
1172 /** Set the doublewrite buffer ID.
1173 @param[in] batch_id Double write batch ID that flushed the page. */
1174 void set_dblwr_batch_id(uint16_t batch_id) { m_dblwr_id = batch_id; }
1175
1176 /** @return the double write batch id, or uint16_t max if undefined. */
1177 [[nodiscard]] uint16_t get_dblwr_batch_id() const { return (m_dblwr_id); }
1178
1179 /** Retrieve the tablespace id.
1180 @return tablespace id */
1181 [[nodiscard]] space_id_t space() const noexcept { return id.space(); }
1182
1183 /** Retrieve the page number.
1184 @return page number */
1185 [[nodiscard]] page_no_t page_no() const noexcept { return id.page_no(); }
1186
1187 /** Checks if this space reference saved during last page ID initialization
1188 was deleted or truncated since.
1189 @return true when space reference stored leads was deleted or truncated and
1190 this page should be discarded. Result is up to date until the fil_shard mutex
1191 is released. */
1192 inline bool is_stale() const {
1193 ut_a(m_space != nullptr);
1194 ut_a(id.space() == m_space->id);
1195 ut_a(m_version <= m_space->get_current_version());
1197 ut_a(!m_space->is_deleted());
1198 return false;
1199 } else {
1200 return true;
1201 }
1202 }
1203
1204 /** Checks if this space reference saved during last page ID initialization
1205 was deleted or truncated since.
1206 @return true when space reference stored leads was deleted or truncated and
1207 this page should be discarded. When false is returned, the status of stale is
1208 checked to be guaranteed. */
1209 inline bool was_stale() const {
1210 ut_a(m_space != nullptr);
1211 ut_a(id.space() == m_space->id);
1212 /* If the the version is OK, then the space must not be deleted.
1213 However, version is modified before the deletion flag is set, so reading
1214 these values need to be executed in reversed order. The atomic reads
1215 cannot be relaxed for it to work. */
1216 bool was_not_deleted = m_space->was_not_deleted();
1218 ut_a(was_not_deleted);
1219 return false;
1220 } else {
1221 return true;
1222 }
1223 }
1224
1225 /** Retrieve the tablespace object if one was available during page ID
1226 initialization. The returned object is safe to use as long as this buf_page_t
1227 object is not changed. Caller should have a IO fix, buffer fix, mutex or any
1228 other mean to assure the page will not be freed. After that is released the
1229 space object may be freed.
1230 @return tablespace object */
1231 inline fil_space_t *get_space() const { return m_space; }
1232
1233 /** Sets stored page ID to the new value. Handles space object reference
1234 count.
1235 @param[in] new_page_id new page ID to be set. */
1236 inline void reset_page_id(page_id_t new_page_id) {
1237 if (m_space != nullptr) {
1238 /* If we reach this line through a call chain:
1239 srv_shutdown -> buf_pool_free_all -> buf_pool_free_instance ->
1240 buf_page_free_descriptor, then we are already past the fil system
1241 shutdown, and all fil_space_t objects were already freed. */
1243 m_space->dec_ref();
1244 }
1245 }
1246 id = new_page_id;
1248 }
1249
1250 /** Sets stored value to invalid/empty value. Handles space object reference
1251 count. */
1252 inline void reset_page_id() {
1254 }
1255
1256 private:
1257 /** Updates new space reference and acquires "reference count latch" and the
1258 current version of the space object. */
1259 inline void space_id_changed() {
1260 m_space = nullptr;
1261 m_version = 0;
1262 if (id.space() != UINT32_UNDEFINED) {
1263 m_space = fil_space_get(id.space());
1264 if (m_space) {
1265 m_space->inc_ref();
1266 /* We don't have a way to check the MDL locks, which are guarding the
1267 version number, so we don't use get_current_version(). */
1269 }
1270 }
1271 }
1272
1273 public:
1274 /** @return the flush observer instance. */
1276
1277 /** Set the flush observer for the page.
1278 @param[in] flush_observer The flush observer to set. */
1279 void set_flush_observer(Flush_observer *flush_observer) noexcept {
1280 /* Don't allow to set flush observer from non-null to null, or from one
1281 observer to another. */
1282 ut_a(m_flush_observer == nullptr || m_flush_observer == flush_observer);
1283 m_flush_observer = flush_observer;
1284 }
1285
1286 /** Remove the flush observer. */
1287 void reset_flush_observer() noexcept { m_flush_observer = nullptr; }
1288#endif /* !UNIV_HOTBACKUP */
1289
1290 /** @return the LSN of the latest modification. */
1291 lsn_t get_newest_lsn() const noexcept { return newest_modification; }
1292
1293 /** @return the LSN of the first modification since the last time
1294 it was clean. */
1295 lsn_t get_oldest_lsn() const noexcept { return oldest_modification; }
1296
1297 /** @return true if the page is dirty. */
1298 bool is_dirty() const noexcept { return get_oldest_lsn() > 0; }
1299
1300 /** Set the latest modification LSN.
1301 @param[in] lsn Latest modification lSN. */
1303
1304 /** Set the LSN when the page is modified for the first time.
1305 @param[in] lsn First modification LSN. */
1306 void set_oldest_lsn(lsn_t lsn) noexcept;
1307
1308 /** Set page to clean state. */
1309 void set_clean() noexcept { set_oldest_lsn(0); }
1310
1311 /** @name General fields
1312 None of these bit-fields must be modified without holding
1313 buf_page_get_mutex() [buf_block_t::mutex or
1314 buf_pool->zip_mutex], since they can be stored in the same
1315 machine word. */
1316 /** @{ */
1317
1318 /** Page id. */
1320
1321 /** Page size. */
1323
1324 /** Count of how many fold this block is currently bufferfixed. */
1326
1327 private:
1328 /** Type of pending I/O operation.
1329 Modified under protection of buf_page_get_mutex(this).
1330 Read under protection of rules described in @see Buf_io_fix_latching_rules */
1332
1333#ifdef UNIV_DEBUG
1334 public:
1335 /** Checks if io_fix has any of the known enum values.
1336 @param[in] io_fix the value to test
1337 @return true iff io_fix has any of the known enum values
1338 */
1340 switch (io_fix) {
1341 case BUF_IO_NONE:
1342 case BUF_IO_READ:
1343 case BUF_IO_WRITE:
1344 case BUF_IO_PIN:
1345 return true;
1346 }
1347 return false;
1348 }
1349
1350 private:
1351 /** Checks if io_fix has any of the known enum values.
1352 @return true iff io_fix has any of the known enum values
1353 */
1356 }
1357 /* Helper debug-only functions related latching rules are moved to a separate
1358 class so that this header doesn't have to pull in Stateful_latching_rules.*/
1361
1362 /* Helper debug-only class used to track which thread is currently responsible
1363 for performing I/O operation on this page. There's at most one such thread and
1364 the responsibility might be passed from one to another during async I/O. This
1365 is used to prove correctness of io_fix state transitions and checking it
1366 without a latch in the io_completion threads. */
1368 /** The thread responsible for I/O on this page, or an impossible value if
1369 no thread is currently responsible for I/O*/
1370 std::thread::id responsible_thread{std::thread().get_id()};
1371
1372 public:
1373 /** Checks if there is any thread responsible for I/O on this page now.
1374 @return true iff there is a thread responsible for I/O on this page.*/
1376 return responsible_thread != std::thread().get_id();
1377 }
1378
1379 /** Checks if the current thread is responsible for I/O on this page now.
1380 @return true iff the current thread is responsible for I/O on this page.*/
1383 }
1384
1385 /** Called by the thread responsible for I/O on this page to release its
1386 responsibility. */
1387 void release() {
1389 responsible_thread = std::thread().get_id();
1390 }
1391
1392 /** Called by the thread which becomes responsible for I/O on this page to
1393 indicate that it takes the responsibility. */
1394 void take() {
1397 }
1398 };
1399 /** Tracks which thread is responsible for I/O on this page. */
1401
1402 public:
1403 /** Checks if there is any thread responsible for I/O on this page now.
1404 @return true iff there is a thread responsible for I/O on this page.*/
1407 }
1408
1409 /** Checks if the current thread is responsible for I/O on this page now.
1410 @return true iff the current thread is responsible for I/O on this page.*/
1413 }
1414
1415 /** Called by the thread responsible for I/O on this page to release its
1416 responsibility. */
1418
1419 /** Called by the thread which becomes responsible for I/O on this page to
1420 indicate that it takes the responsibility. */
1423 io_fix.load(std::memory_order_relaxed) == BUF_IO_WRITE ||
1424 io_fix.load(std::memory_order_relaxed) == BUF_IO_READ);
1426 }
1427#endif /* UNIV_DEBUG */
1428 private:
1429 /** Retrieves a value of io_fix without requiring or acquiring any latches.
1430 Note that this implies that the value might be stale unless caller establishes
1431 happens-before relation in some other way.
1432 This is a low-level function which shouldn't be used directly, but
1433 rather via wrapper methods which check if proper latches are taken or via one
1434 of the many `was_io_fix_something()` methods with name explicitly warning the
1435 developer about the uncertainty involved.
1436 @return the value of io_fix at some moment "during" the call */
1439 return io_fix.load(std::memory_order_relaxed);
1440 }
1441
1442 public:
1443 /** This is called only when having full ownership of the page object and no
1444 other thread can reach it. This currently happens during buf_pool_create(),
1445 buf_pool_resize() (which latch quite a lot) or from fil_tablespace_iterate()
1446 which creates a fake, private block which is not really a part of the buffer
1447 pool.
1448 Therefore we allow this function to set io_fix without checking for any
1449 latches.
1450 Please use set_io_fix(BUF_IO_NONE) to change state in a regular situation. */
1452 io_fix.store(BUF_IO_NONE, std::memory_order_relaxed);
1453 /* This is only needed because places which call init_io_fix() do not call
1454 buf_page_t's constructor */
1456 }
1457
1458 /** This is called only when having full ownership of the page object and no
1459 other thread can reach it. This currently happens during buf_page_init_low()
1460 under buf_page_get_mutex(this), on a previously initialized page for reuse,
1461 yet should be treated as initialization of the field, not a state transition.
1462 Please use set_io_fix(BUF_IO_NONE) to change state in a regular situation. */
1464 ut_ad(io_fix.load(std::memory_order_relaxed) == BUF_IO_NONE);
1466 io_fix.store(BUF_IO_NONE, std::memory_order_relaxed);
1467 }
1468
1469 /** Sets io_fix to specified value.
1470 Assumes the caller holds buf_page_get_mutex(this).
1471 Might require additional latches depending on particular state transition.
1472 Calls take_io_responsibility() or release_io_responsibility() as needed.
1473 @see Buf_io_fix_latching_rules for specific rules. */
1475
1476 /** Retrieves the current value of io_fix.
1477 Assumes the caller holds buf_page_get_mutex(this).
1478 @return the current value of io_fix */
1481 return get_io_fix_snapshot();
1482 }
1483
1484 /** Checks if the current value of io_fix is BUF_IO_WRITE.
1485 Assumes the caller holds buf_page_get_mutex(this) or some other latches which
1486 prevent state transition from/to BUF_IO_WRITE.
1487 @see Buf_io_fix_latching_rules for specific rules.
1488 @return true iff the current value of io_fix == BUF_IO_WRITE */
1489 bool is_io_fix_write() const;
1490
1491 /** Checks if the current value of io_fix is BUF_IO_READ.
1492 Assumes the caller holds buf_page_get_mutex(this) or some other latches which
1493 prevent state transition from/to BUF_IO_READ.
1494 @see Buf_io_fix_latching_rules for specific rules.
1495 @return true iff the current value of io_fix == BUF_IO_READ */
1496 bool is_io_fix_read() const;
1497
1498 /** Assuming that io_fix is either BUF_IO_READ or BUF_IO_WRITE determines
1499 which of the two it is. Additionally it assumes the caller holds
1500 buf_page_get_mutex(this) or some other latches which prevent state transition
1501 from BUF_IO_READ or from BUF_IO_WRITE to another state.
1502 @see Buf_io_fix_latching_rules for specific rules.
1503 @return true iff the current value of io_fix == BUF_IO_READ */
1505
1506 /** Checks if io_fix is BUF_IO_READ without requiring or acquiring any
1507 latches.
1508 Note that this implies calling this function twice in a row could produce
1509 different results.
1510 @return true iff io_fix equal to BUF_IO_READ was noticed*/
1512
1513 /** Checks if io_fix is BUF_IO_FIX or BUF_IO_READ or BUF_IO_WRITE without
1514 requiring or acquiring any latches.
1515 Note that this implies calling this function twice in a row could produce
1516 different results.
1517 @return true iff io_fix not equal to BUF_IO_NONE was noticed */
1518 bool was_io_fixed() const { return get_io_fix_snapshot() != BUF_IO_NONE; }
1519
1520 /** Checks if io_fix is BUF_IO_NONE without requiring or acquiring any
1521 latches.
1522 Note that this implies calling this function twice in a row could produce
1523 different results.
1524 Please, prefer this function over !was_io_fixed() to avoid the misleading
1525 interpretation as "not(Exists time such that io_fix(time))", while in fact we
1526 want and get "Exists time such that !io_fix(time)".
1527 @return true iff io_fix equal to BUF_IO_NONE was noticed */
1529
1530 /** Block state. @see buf_page_in_file */
1532
1533 /** If this block is currently being flushed to disk, this tells
1534 the flush_type. @see buf_flush_t */
1536
1537 /** Index number of the buffer pool that this block belongs to */
1539
1540 static_assert(MAX_BUFFER_POOLS <= 64,
1541 "MAX_BUFFER_POOLS > 64; redefine buf_pool_index");
1542
1543 /** @} */
1544#ifndef UNIV_HOTBACKUP
1545 /** Node used in chaining to buf_pool->page_hash or buf_pool->zip_hash */
1547#endif /* !UNIV_HOTBACKUP */
1548
1549 /** @name Page flushing fields
1550 All these are protected by buf_pool->mutex. */
1551 /** @{ */
1552
1553 /** Based on state, this is a list node, protected by the corresponding list
1554 mutex, in one of the following lists in buf_pool:
1555
1556 - BUF_BLOCK_NOT_USED: free, withdraw
1557 - BUF_BLOCK_FILE_PAGE: flush_list
1558 - BUF_BLOCK_ZIP_DIRTY: flush_list
1559 - BUF_BLOCK_ZIP_PAGE: zip_clean
1560
1561 The node pointers are protected by the corresponding list mutex.
1562
1563 The contents of the list node is undefined if !in_flush_list &&
1564 state == BUF_BLOCK_FILE_PAGE, or if state is one of
1565 BUF_BLOCK_MEMORY,
1566 BUF_BLOCK_REMOVE_HASH or
1567 BUF_BLOCK_READY_IN_USE. */
1568
1570
1571 private:
1572 /** The flush LSN, LSN when this page was written to the redo log. For
1573 non redo logged pages this is set using: buf_flush_borrow_lsn() */
1575
1576 /** log sequence number of the youngest modification to this block, zero
1577 if not modified. Protected by block mutex */
1579
1580 public:
1581 /** log sequence number of the START of the log entry written of the oldest
1582 modification to this block which has not yet been flushed on disk; zero if
1583 all modifications are on disk. Writes to this field must be covered by both
1584 block->mutex and buf_pool->flush_list_mutex. Hence reads can happen while
1585 holding any one of the two mutexes */
1586 /** @} */
1587
1588 /** @name LRU replacement algorithm fields
1589 These fields are protected by both buf_pool->LRU_list_mutex and the
1590 block mutex. */
1591 /** @{ */
1592
1593 /** node of the LRU list */
1595
1596 /** compressed page; zip.data (but not the data it points to) is
1597 protected by buf_pool->zip_mutex; state == BUF_BLOCK_ZIP_PAGE and
1598 zip.data == NULL means an active buf_pool->watch */
1600
1601#ifndef UNIV_HOTBACKUP
1602 /** Flush observer instance. */
1604
1605 /** Tablespace instance that this page belongs to. */
1607
1608 /** The value of buf_pool->freed_page_clock when this block was the last
1609 time put to the head of the LRU list; a thread is allowed to read this
1610 for heuristic purposes without holding any mutex or latch */
1612
1613 /** @} */
1614 /** Version of fil_space_t when the page was updated. It can also be viewed as
1615 the truncation number. */
1616 uint32_t m_version{};
1617
1618 /** Time of first access, or 0 if the block was never accessed in the
1619 buffer pool. Protected by block mutex */
1620 std::chrono::steady_clock::time_point access_time;
1621
1622 private:
1623 /** Double write instance ordinal value during writes. This is used
1624 by IO completion (writes) to select the double write instance.*/
1625 uint16_t m_dblwr_id{};
1626
1627 public:
1628 /** true if the block is in the old blocks in buf_pool->LRU_old */
1629 bool old;
1630
1631#ifdef UNIV_DEBUG
1632 /** This is set to true when fsp frees a page in buffer pool;
1633 protected by buf_pool->zip_mutex or buf_block_t::mutex. */
1635
1636 /** true if in buf_pool->flush_list; when buf_pool->flush_list_mutex
1637 is free, the following should hold:
1638 in_flush_list == (state == BUF_BLOCK_FILE_PAGE ||
1639 state == BUF_BLOCK_ZIP_DIRTY)
1640 Writes to this field must be covered by both buf_pool->flush_list_mutex
1641 and block->mutex. Hence reads can happen while holding any one of the
1642 two mutexes */
1644
1645 /** true if in buf_pool->free; when buf_pool->free_list_mutex is free, the
1646 following should hold: in_free_list == (state == BUF_BLOCK_NOT_USED) */
1648
1649 /** true if the page is in the LRU list; used in debugging */
1651
1652 /** true if in buf_pool->page_hash */
1654
1655 /** true if in buf_pool->zip_hash */
1657#endif /* UNIV_DEBUG */
1658
1659#endif /* !UNIV_HOTBACKUP */
1660};
1661
1662/** The buffer control block structure */
1663
1665 /** @name General fields */
1666 /** @{ */
1667
1668 /** page information; this must be the first field, so
1669 that buf_pool->page_hash can point to buf_page_t or buf_block_t */
1671
1672#ifndef UNIV_HOTBACKUP
1673 /** read-write lock of the buffer frame */
1675
1676#endif /* UNIV_HOTBACKUP */
1677
1678 /** pointer to buffer frame which is of size UNIV_PAGE_SIZE, and aligned
1679 to an address divisible by UNIV_PAGE_SIZE */
1680 byte *frame;
1681
1682 /** node of the decompressed LRU list; a block is in the unzip_LRU list if
1683 page.state == BUF_BLOCK_FILE_PAGE and page.zip.data != NULL. Protected by
1684 both LRU_list_mutex and the block mutex. */
1686#ifdef UNIV_DEBUG
1687
1688 /** true if the page is in the decompressed LRU list; used in debugging */
1690
1692#endif /* UNIV_DEBUG */
1693
1694 /** @} */
1695
1696 /** @name Hash search fields (unprotected)
1697 NOTE that these fields are NOT protected by any semaphore! */
1698 /** @{ */
1699
1700 /** Counter which controls building of a new hash index for the page */
1701 std::atomic<uint32_t> n_hash_helps;
1702
1703 /** Recommended prefix length for hash search: number of bytes in an
1704 incomplete last field */
1705 std::atomic<uint32_t> n_bytes;
1706
1707 /** Recommended prefix length for hash search: number of full fields */
1708 std::atomic<uint32_t> n_fields;
1709
1710 /** true or false, depending on whether the leftmost record of several
1711 records with the same prefix should be indexed in the hash index */
1712 std::atomic<bool> left_side;
1713 /** @} */
1714
1715 /** @name Hash search fields
1716 These 5 fields may only be modified when:
1717 we are holding the appropriate x-latch in btr_search_latches[], and
1718 one of the following holds:
1719 (1) the block state is BUF_BLOCK_FILE_PAGE, and
1720 we are holding an s-latch or x-latch on buf_block_t::lock, or
1721 (2) buf_block_t::buf_fix_count == 0, or
1722 (3) the block state is BUF_BLOCK_REMOVE_HASH.
1723
1724 An exception to this is when we init or create a page
1725 in the buffer pool in buf0buf.cc.
1726
1727 Another exception for buf_pool_clear_hash_index() is that
1728 assigning block->index = NULL (and block->n_pointers = 0)
1729 is allowed whenever btr_search_own_all(RW_LOCK_X).
1730
1731 Another exception is that ha_insert_for_hash_func() may
1732 decrement n_pointers without holding the appropriate latch
1733 in btr_search_latches[]. Thus, n_pointers must be
1734 protected by atomic memory access.
1735
1736 This implies that the fields may be read without race
1737 condition whenever any of the following hold:
1738 - the btr_search_latches[] s-latch or x-latch is being held, or
1739 - the block state is not BUF_BLOCK_FILE_PAGE or BUF_BLOCK_REMOVE_HASH,
1740 and holding some latch prevents the state from changing to that.
1741
1742 Some use of assert_block_ahi_empty() or assert_block_ahi_valid()
1743 is prone to race conditions while buf_pool_clear_hash_index() is
1744 executing (the adaptive hash index is being disabled). Such use
1745 is explicitly commented. */
1746
1747 /** @{ */
1748
1749#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1750 /** used in debugging: the number of pointers in the adaptive hash index
1751 pointing to this frame; protected by atomic memory access or
1752 btr_search_own_all(). */
1753 std::atomic<ulint> n_pointers;
1754
1755#define assert_block_ahi_valid(block) \
1756 ut_a((block)->index || (block)->n_pointers.load() == 0)
1757#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1758#define assert_block_ahi_empty(block) /* nothing */
1759#define assert_block_ahi_empty_on_init(block) /* nothing */
1760#define assert_block_ahi_valid(block) /* nothing */
1761#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1762
1763 /** prefix length for hash indexing: number of full fields */
1765
1766 /** number of bytes in hash indexing */
1768
1769 /** true or false in hash indexing */
1771
1772 /** true if block has been made dirty without acquiring X/SX latch as the
1773 block belongs to temporary tablespace and block is always accessed by a
1774 single thread. */
1776
1777 /** Index for which the adaptive hash index has been created, or NULL if
1778 the page does not exist in the index. Note that it does not guarantee that
1779 the index is complete, though: there may have been hash collisions, record
1780 deletions, etc. */
1782
1783 /** @} */
1784#ifndef UNIV_HOTBACKUP
1785#ifdef UNIV_DEBUG
1786 /** @name Debug fields */
1787 /** @{ */
1788 /** In the debug version, each thread which bufferfixes the block acquires
1789 an s-latch here; so we can use the debug utilities in sync0rw */
1791 /** @} */
1792#endif /* UNIV_DEBUG */
1793#endif /* !UNIV_HOTBACKUP */
1794
1795 /** @name Optimistic search field */
1796 /** @{ */
1797
1798 /** This clock is incremented every time a pointer to a record on the page
1799 may become obsolete; this is used in the optimistic cursor positioning: if
1800 the modify clock has not changed, we know that the pointer is still valid;
1801 this field may be changed if the thread (1) owns the LRU list mutex and the
1802 page is not bufferfixed, or (2) the thread has an x-latch on the block,
1803 or (3) the block must belong to an intrinsic table */
1805
1806 /** @} */
1807
1808 /** mutex protecting this block: state (also protected by the buffer
1809 pool mutex), io_fix, buf_fix_count, and accessed; we introduce this
1810 new mutex in InnoDB-5.1 to relieve contention on the buffer pool mutex */
1812
1813 /** Get the modified clock (version) value.
1814 @param[in] single_threaded Thread can only be written to or read by a
1815 single thread
1816 @return the modified clock vlue. */
1817 uint64_t get_modify_clock(IF_DEBUG(bool single_threaded)) const noexcept {
1818#if defined(UNIV_DEBUG) && !defined(UNIV_LIBRARY) && !defined(UNIV_HOTBACKUP)
1819 /* No block latch is acquired when blocks access is guaranteed to be
1820 in single threaded mode. */
1822 ut_ad(single_threaded || rw_lock_own_flagged(&lock, mode));
1823#endif /* UNIV_DEBUG && !UNIV_LIBRARY */
1824
1825 return modify_clock;
1826 }
1827
1828 /** Get the page number and space id of the current buffer block.
1829 @return page number of the current buffer block. */
1830 const page_id_t &get_page_id() const { return page.id; }
1831
1832 /** Get the page number of the current buffer block.
1833 @return page number of the current buffer block. */
1834 page_no_t get_page_no() const { return (page.id.page_no()); }
1835
1836 /** Get the next page number of the current buffer block.
1837 @return next page number of the current buffer block. */
1840 }
1841
1842 /** Get the prev page number of the current buffer block.
1843 @return prev page number of the current buffer block. */
1846 }
1847
1848 /** Get the page type of the current buffer block.
1849 @return page type of the current buffer block. */
1852 }
1853
1854 /** Get the page type of the current buffer block as string.
1855 @return page type of the current buffer block as string. */
1856 [[nodiscard]] const char *get_page_type_str() const noexcept;
1857
1858 /** Gets the compressed page descriptor corresponding to an uncompressed page
1859 if applicable.
1860 @return page descriptor or nullptr. */
1862 return page.zip.data != nullptr ? &page.zip : nullptr;
1863 }
1864
1865 /** Const version.
1866 @return page descriptor or nullptr. */
1867 page_zip_des_t const *get_page_zip() const noexcept {
1868 return page.zip.data != nullptr ? &page.zip : nullptr;
1869 }
1870};
1871
1872/** Check if a buf_block_t object is in a valid state
1873@param block buffer block
1874@return true if valid */
1876 return buf_block_get_state(block) >= BUF_BLOCK_NOT_USED &&
1878}
1879
1880/** Compute the hash value for blocks in buf_pool->zip_hash. */
1881/** @{ */
1882static inline uint64_t buf_pool_hash_zip_frame(void *ptr) {
1883 return ut::hash_uint64(reinterpret_cast<uintptr_t>(ptr) >>
1885}
1886static inline uint64_t buf_pool_hash_zip(buf_block_t *b) {
1887 return buf_pool_hash_zip_frame(b->frame);
1888}
1889/** @} */
1890
1891/** A "Hazard Pointer" class used to iterate over page lists
1892inside the buffer pool. A hazard pointer is a buf_page_t pointer
1893which we intend to iterate over next and we want it remain valid
1894even after we release the buffer pool mutex. */
1896 public:
1897 /** Constructor
1898 @param buf_pool buffer pool instance
1899 @param mutex mutex that is protecting the hp. */
1900 HazardPointer(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
1901 : m_buf_pool(buf_pool) IF_DEBUG(, m_mutex(mutex)), m_hp() {}
1902
1903 /** Destructor */
1904 virtual ~HazardPointer() = default;
1905
1906 /** Get current value */
1907 buf_page_t *get() const {
1909 return (m_hp);
1910 }
1911
1912 /** Set current value
1913 @param bpage buffer block to be set as hp */
1914 void set(buf_page_t *bpage);
1915
1916 /** Checks if a bpage is the hp
1917 @param bpage buffer block to be compared
1918 @return true if it is hp */
1919 bool is_hp(const buf_page_t *bpage);
1920
1921 /** Adjust the value of hp. This happens when some
1922 other thread working on the same list attempts to
1923 remove the hp from the list. Must be implemented
1924 by the derived classes.
1925 @param bpage buffer block to be compared */
1926 virtual void adjust(const buf_page_t *bpage) = 0;
1927
1928 /** Adjust the value of hp for moving. This happens
1929 when some other thread working on the same list
1930 attempts to relocate the hp of the page.
1931 @param bpage buffer block to be compared
1932 @param dpage buffer block to be moved to */
1933 void move(const buf_page_t *bpage, buf_page_t *dpage);
1934
1935 protected:
1936 /** Disable copying */
1939
1940 /** Buffer pool instance */
1942
1943#ifdef UNIV_DEBUG
1944 /** mutex that protects access to the m_hp. */
1945 const ib_mutex_t *m_mutex;
1946#endif /* UNIV_DEBUG */
1947
1948 /** hazard pointer. */
1950};
1951
1952#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1954 ut_a((block)->n_pointers.load() == 0);
1955}
1956
1958 UNIV_MEM_VALID(&block->n_pointers, sizeof(block)->n_pointers);
1960}
1961#endif
1962
1963/** Class implementing buf_pool->flush_list hazard pointer */
1964class FlushHp : public HazardPointer {
1965 public:
1966 /** Constructor
1967 @param buf_pool buffer pool instance
1968 @param mutex mutex that is protecting the hp. */
1969 FlushHp(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
1970 : HazardPointer(buf_pool, mutex) {}
1971
1972 /** Destructor */
1973 ~FlushHp() override = default;
1974
1975 /** Adjust the value of hp. This happens when some
1976 other thread working on the same list attempts to
1977 remove the hp from the list.
1978 @param bpage buffer block to be compared */
1979 void adjust(const buf_page_t *bpage) override;
1980};
1981
1982/** Class implementing buf_pool->LRU hazard pointer */
1983class LRUHp : public HazardPointer {
1984 public:
1985 /** Constructor
1986 @param buf_pool buffer pool instance
1987 @param mutex mutex that is protecting the hp. */
1988 LRUHp(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
1989 : HazardPointer(buf_pool, mutex) {}
1990
1991 /** Destructor */
1992 ~LRUHp() override = default;
1993
1994 /** Adjust the value of hp. This happens when some
1995 other thread working on the same list attempts to
1996 remove the hp from the list.
1997 @param bpage buffer block to be compared */
1998 void adjust(const buf_page_t *bpage) override;
1999};
2000
2001/** Special purpose iterators to be used when scanning the LRU list.
2002The idea is that when one thread finishes the scan it leaves the
2003itr in that position and the other thread can start scan from
2004there */
2005class LRUItr : public LRUHp {
2006 public:
2007 /** Constructor
2008 @param buf_pool buffer pool instance
2009 @param mutex mutex that is protecting the hp. */
2010 LRUItr(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
2011 : LRUHp(buf_pool, mutex) {}
2012
2013 /** Destructor */
2014 ~LRUItr() override = default;
2015
2016 /** Selects from where to start a scan. If we have scanned
2017 too deep into the LRU list it resets the value to the tail
2018 of the LRU list.
2019 @return buf_page_t from where to start scan. */
2020 buf_page_t *start();
2021};
2022
2023/** Struct that is embedded in the free zip blocks */
2025 union {
2026 ulint size; /*!< size of the block */
2028 /*!< stamp[FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID]
2029 == BUF_BUDDY_FREE_STAMP denotes a free
2030 block. If the space_id field of buddy
2031 block != BUF_BUDDY_FREE_STAMP, the block
2032 is not in any zip_free list. If the
2033 space_id is BUF_BUDDY_FREE_STAMP then
2034 stamp[0] will contain the
2035 buddy block size. */
2037
2038 buf_page_t bpage; /*!< Embedded bpage descriptor */
2040 /*!< Node of zip_free list */
2041};
2042
2043/** @brief The buffer pool statistics structure. */
2046
2047 /** Number of page gets performed; also successful searches through the
2048 adaptive hash index are counted as page gets; this field is NOT protected
2049 by the buffer pool mutex */
2051
2052 /** Number of read operations. */
2053 std::atomic<uint64_t> n_pages_read;
2054
2055 /** Number of write operations. */
2056 std::atomic<uint64_t> n_pages_written;
2057
2058 /** number of pages created in the pool with no read. */
2059 std::atomic<uint64_t> n_pages_created;
2060
2061 /** Number of pages read in as part of random read ahead. */
2062 std::atomic<uint64_t> n_ra_pages_read_rnd;
2063
2064 /** Number of pages read in as part of read ahead. */
2065 std::atomic<uint64_t> n_ra_pages_read;
2066
2067 /** Number of read ahead pages that are evicted without being accessed.
2068 Protected by LRU_list_mutex. */
2070
2071 /** Number of pages made young, in calls to buf_LRU_make_block_young().
2072 Protected by LRU_list_mutex. */
2074
2075 /** Number of pages not made young because the first access was not long
2076 enough ago, in buf_page_peek_if_too_old(). Not protected. */
2078
2079 /** LRU size in bytes. Protected by LRU_list_mutex. */
2080 uint64_t LRU_bytes;
2081
2082 /** Flush_list size in bytes. Protected by flush_list_mutex */
2084
2085 static void copy(buf_pool_stat_t &dst, const buf_pool_stat_t &src) noexcept {
2086 Counter::copy(dst.m_n_page_gets, src.m_n_page_gets);
2087
2088 dst.n_pages_read.store(src.n_pages_read.load());
2089
2090 dst.n_pages_written.store(src.n_pages_written.load());
2091
2092 dst.n_pages_created.store(src.n_pages_created.load());
2093
2094 dst.n_ra_pages_read_rnd.store(src.n_ra_pages_read_rnd.load());
2095
2096 dst.n_ra_pages_read.store(src.n_ra_pages_read.load());
2097
2098 dst.n_ra_pages_evicted = src.n_ra_pages_evicted;
2099
2100 dst.n_pages_made_young = src.n_pages_made_young;
2101
2102 dst.n_pages_not_made_young = src.n_pages_not_made_young;
2103
2104 dst.LRU_bytes = src.LRU_bytes;
2105
2106 dst.flush_list_bytes = src.flush_list_bytes;
2107 }
2108
2109 void reset() {
2111
2112 n_pages_read = 0;
2113 n_pages_written = 0;
2114 n_pages_created = 0;
2116 n_ra_pages_read = 0;
2120 LRU_bytes = 0;
2121 flush_list_bytes = 0;
2122 }
2123};
2124
2125/** Statistics of buddy blocks of a given size. */
2127 /** Number of blocks allocated from the buddy system. */
2128 std::atomic<ulint> used;
2129 /** Number of blocks relocated by the buddy system.
2130 Protected by buf_pool zip_free_mutex. */
2131 uint64_t relocated;
2132 /** Total duration of block relocations.
2133 Protected by buf_pool zip_free_mutex. */
2134 std::chrono::steady_clock::duration relocated_duration;
2135
2136 struct snapshot_t {
2138 uint64_t relocated;
2139 std::chrono::steady_clock::duration relocated_duration;
2140 };
2141
2143 return {used.load(), relocated, relocated_duration};
2144 }
2145};
2146
2147/** @brief The buffer pool structure.
2148
2149NOTE! The definition appears here only for other modules of this
2150directory (buf) to see it. Do not use from outside! */
2151
2153 /** @name General fields */
2154 /** @{ */
2155 /** protects (de)allocation of chunks:
2156 - changes to chunks, n_chunks are performed while holding this latch,
2157 - reading buf_pool_should_madvise requires holding this latch for any
2158 buf_pool_t
2159 - writing to buf_pool_should_madvise requires holding these latches
2160 for all buf_pool_t-s */
2162
2163 /** LRU list mutex */
2165
2166 /** free and withdraw list mutex */
2168
2169 /** buddy allocator mutex */
2171
2172 /** zip_hash mutex */
2174
2175 /** Flush state protection mutex */
2177
2178 /** Zip mutex of this buffer pool instance, protects compressed only pages (of
2179 type buf_page_t, not buf_block_t */
2181
2182 /** Array index of this buffer pool instance */
2184
2185 /** Current pool size in bytes */
2187
2188 /** Reserve this much of the buffer pool for "old" blocks */
2190#ifdef UNIV_DEBUG
2191 /** Number of frames allocated from the buffer pool to the buddy system.
2192 Protected by zip_hash_mutex. */
2194#endif
2195
2196 /** Number of buffer pool chunks */
2197 volatile ulint n_chunks;
2198
2199 /** New number of buffer pool chunks */
2201
2202 /** buffer pool chunks */
2204
2205 /** old buffer pool chunks to be freed after resizing buffer pool */
2207
2208 /** Current pool size in pages */
2210
2211 /** Previous pool size in pages */
2213
2214 /** Size in pages of the area which the read-ahead algorithms read
2215 if invoked */
2217
2218 /** Hash table of buf_page_t or buf_block_t file pages, buf_page_in_file() ==
2219 true, indexed by (space_id, offset). page_hash is protected by an array of
2220 mutexes. */
2222
2223 /** Hash table of buf_block_t blocks whose frames are allocated to the zip
2224 buddy system, indexed by block->frame */
2226
2227 /** Number of pending read operations. Accessed atomically */
2228 std::atomic<ulint> n_pend_reads;
2229
2230 /** number of pending decompressions. Accessed atomically. */
2231 std::atomic<ulint> n_pend_unzip;
2232
2233 /** when buf_print_io was last time called. Accesses not protected. */
2234 std::chrono::steady_clock::time_point last_printout_time;
2235
2236 /** Statistics of buddy system, indexed by block size. Protected by zip_free
2237 mutex, except for the used field, which is also accessed atomically */
2239
2240 /** Current statistics */
2242
2243 /** Old statistics */
2245
2246 /** @} */
2247
2248 /** @name Page flushing algorithm fields */
2249
2250 /** @{ */
2251
2252 /** Mutex protecting the flush list access. This mutex protects flush_list,
2253 flush_rbt and bpage::list pointers when the bpage is on flush_list. It also
2254 protects writes to bpage::oldest_modification and flush_list_hp */
2256
2257 /** "Hazard pointer" used during scan of flush_list while doing flush list
2258 batch. Protected by flush_list_mutex */
2260
2261 /** Entry pointer to scan the oldest page except for system temporary */
2263
2264 /** Base node of the modified block list */
2266
2267 /** This is true when a flush of the given type is being initialized.
2268 Protected by flush_state_mutex. */
2270
2271 /** This is the number of pending writes in the given flush type. Protected
2272 by flush_state_mutex. */
2274
2275 /** This is in the set state when there is no flush batch of the given type
2276 running. Protected by flush_state_mutex. */
2278
2279 /** A red-black tree is used exclusively during recovery to speed up
2280 insertions in the flush_list. This tree contains blocks in order of
2281 oldest_modification LSN and is kept in sync with the flush_list. Each
2282 member of the tree MUST also be on the flush_list. This tree is relevant
2283 only in recovery and is set to NULL once the recovery is over. Protected
2284 by flush_list_mutex */
2286
2287 /** A sequence number used to count the number of buffer blocks removed from
2288 the end of the LRU list; NOTE that this counter may wrap around at 4
2289 billion! A thread is allowed to read this for heuristic purposes without
2290 holding any mutex or latch. For non-heuristic purposes protected by
2291 LRU_list_mutex */
2293
2294 /** Set to false when an LRU scan for free block fails. This flag is used to
2295 avoid repeated scans of LRU list when we know that there is no free block
2296 available in the scan depth for eviction. Set to true whenever we flush a
2297 batch from the buffer pool. Accessed protected by memory barriers. */
2299
2300 /** Page Tracking start LSN. */
2302
2303 /** Maximum LSN for which write io has already started. */
2305
2306 /** @} */
2307
2308 /** @name LRU replacement algorithm fields */
2309 /** @{ */
2310
2311 /** Base node of the free block list */
2313
2314 /** base node of the withdraw block list. It is only used during shrinking
2315 buffer pool size, not to reuse the blocks will be removed. Protected by
2316 free_list_mutex */
2318
2319 /** Target length of withdraw block list, when withdrawing */
2321
2322 /** "hazard pointer" used during scan of LRU while doing
2323 LRU list batch. Protected by buf_pool::LRU_list_mutex */
2325
2326 /** Iterator used to scan the LRU list when searching for
2327 replaceable victim. Protected by buf_pool::LRU_list_mutex. */
2329
2330 /** Iterator used to scan the LRU list when searching for
2331 single page flushing victim. Protected by buf_pool::LRU_list_mutex. */
2333
2334 /** Base node of the LRU list */
2336
2337 /** Pointer to the about LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV oldest blocks in
2338 the LRU list; NULL if LRU length less than BUF_LRU_OLD_MIN_LEN; NOTE: when
2339 LRU_old != NULL, its length should always equal LRU_old_len */
2341
2342 /** Length of the LRU list from the block to which LRU_old points onward,
2343 including that block; see buf0lru.cc for the restrictions on this value; 0
2344 if LRU_old == NULL; NOTE: LRU_old_len must be adjusted whenever LRU_old
2345 shrinks or grows! */
2347
2348 /** Base node of the unzip_LRU list. The list is protected by the
2349 LRU_list_mutex. */
2350 UT_LIST_BASE_NODE_T(buf_block_t, unzip_LRU) unzip_LRU;
2351
2352 /** @} */
2353 /** @name Buddy allocator fields
2354 The buddy allocator is used for allocating compressed page
2355 frames and buf_page_t descriptors of blocks that exist
2356 in the buffer pool only in compressed form. */
2357 /** @{ */
2358#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2359 /** Unmodified compressed pages */
2361#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2362
2363 /** Buddy free lists */
2365
2366 /** Sentinel records for buffer pool watches. Scanning the array is protected
2367 by taking all page_hash latches in X. Updating or reading an individual
2368 watch page is protected by a corresponding individual page_hash latch. */
2370
2371 /** A wrapper for buf_pool_t::allocator.alocate_large which also advices the
2372 OS that this chunk should not be dumped to a core file if that was requested.
2373 Emits a warning to the log and disables @@global.core_file if advising was
2374 requested but could not be performed, but still return true as the allocation
2375 itself succeeded.
2376 @param[in] mem_size number of bytes to allocate
2377 @param[in,out] chunk mem and mem_pfx fields of this chunk will be updated
2378 to contain information about allocated memory region
2379 @return true iff allocated successfully */
2380 bool allocate_chunk(ulonglong mem_size, buf_chunk_t *chunk);
2381
2382 /** A wrapper for buf_pool_t::allocator.deallocate_large which also advices
2383 the OS that this chunk can be dumped to a core file.
2384 Emits a warning to the log and disables @@global.core_file if advising was
2385 requested but could not be performed.
2386 @param[in] chunk mem and mem_pfx fields of this chunk will be used to
2387 locate the memory region to free */
2388 void deallocate_chunk(buf_chunk_t *chunk);
2389
2390 /** Advices the OS that all chunks in this buffer pool instance can be dumped
2391 to a core file.
2392 Emits a warning to the log if could not succeed.
2393 @return true iff succeeded, false if no OS support or failed */
2394 bool madvise_dump();
2395
2396 /** Advices the OS that all chunks in this buffer pool instance should not
2397 be dumped to a core file.
2398 Emits a warning to the log if could not succeed.
2399 @return true iff succeeded, false if no OS support or failed */
2400 bool madvise_dont_dump();
2401
2402 static_assert(BUF_BUDDY_LOW <= UNIV_ZIP_SIZE_MIN,
2403 "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN");
2404 /** @} */
2405};
2406
2407/** Print the given buf_pool_t object.
2408@param[in,out] out the output stream
2409@param[in] buf_pool the buf_pool_t object to be printed
2410@return the output stream */
2411std::ostream &operator<<(std::ostream &out, const buf_pool_t &buf_pool);
2412
2413/** @name Accessors for buffer pool mutexes
2414Use these instead of accessing buffer pool mutexes directly. */
2415/** @{ */
2416
2417#ifndef UNIV_HOTBACKUP
2418/** Test if flush list mutex is owned. */
2419#define buf_flush_list_mutex_own(b) mutex_own(&(b)->flush_list_mutex)
2420
2421/** Acquire the flush list mutex. */
2422#define buf_flush_list_mutex_enter(b) \
2423 do { \
2424 mutex_enter(&(b)->flush_list_mutex); \
2425 } while (0)
2426/** Release the flush list mutex. */
2427#define buf_flush_list_mutex_exit(b) \
2428 do { \
2429 mutex_exit(&(b)->flush_list_mutex); \
2430 } while (0)
2431/** Acquire the block->mutex. */
2432#define buf_page_mutex_enter(b) \
2433 do { \
2434 mutex_enter(&(b)->mutex); \
2435 } while (0)
2436
2437/** Release the block->mutex. */
2438#define buf_page_mutex_exit(b) \
2439 do { \
2440 (b)->mutex.exit(); \
2441 } while (0)
2442
2443/** Get appropriate page_hash_lock. */
2445 const page_id_t page_id) {
2446 return hash_get_lock(buf_pool->page_hash, page_id.hash());
2447}
2448
2449/** If not appropriate page_hash_lock, relock until appropriate. */
2451 const buf_pool_t *buf_pool,
2452 const page_id_t page_id) {
2453 return hash_lock_s_confirm(hash_lock, buf_pool->page_hash, page_id.hash());
2454}
2455
2457 buf_pool_t *buf_pool,
2458 const page_id_t &page_id) {
2459 return hash_lock_x_confirm(hash_lock, buf_pool->page_hash, page_id.hash());
2460}
2461#endif /* !UNIV_HOTBACKUP */
2462
2463#if defined(UNIV_DEBUG) && !defined(UNIV_HOTBACKUP)
2464/** Test if page_hash lock is held in s-mode. */
2466 const buf_page_t *bpage) {
2467 return rw_lock_own(buf_page_hash_lock_get(buf_pool, bpage->id), RW_LOCK_S);
2468}
2469
2470/** Test if page_hash lock is held in x-mode. */
2472 const buf_page_t *bpage) {
2473 return rw_lock_own(buf_page_hash_lock_get((buf_pool), (bpage)->id),
2474 RW_LOCK_X);
2475}
2476
2477/** Test if page_hash lock is held in x or s-mode. */
2478inline bool buf_page_hash_lock_held_s_or_x(const buf_pool_t *buf_pool,
2479 const buf_page_t *bpage) {
2480 return buf_page_hash_lock_held_s(buf_pool, bpage) ||
2481 buf_page_hash_lock_held_x(buf_pool, bpage);
2482}
2483
2484inline bool buf_block_hash_lock_held_s(const buf_pool_t *buf_pool,
2485 const buf_block_t *block) {
2486 return buf_page_hash_lock_held_s(buf_pool, &block->page);
2487}
2488
2489inline bool buf_block_hash_lock_held_x(const buf_pool_t *buf_pool,
2490 const buf_block_t *block) {
2491 return buf_page_hash_lock_held_x(buf_pool, &block->page);
2492}
2493
2495 const buf_block_t *block) {
2496 return buf_page_hash_lock_held_s_or_x(buf_pool, &block->page);
2497}
2498#else /* UNIV_DEBUG && !UNIV_HOTBACKUP */
2499#define buf_page_hash_lock_held_s(b, p) (true)
2500#define buf_page_hash_lock_held_x(b, p) (true)
2501#define buf_page_hash_lock_held_s_or_x(b, p) (true)
2502#define buf_block_hash_lock_held_s(b, p) (true)
2503#define buf_block_hash_lock_held_x(b, p) (true)
2504#define buf_block_hash_lock_held_s_or_x(b, p) (true)
2505#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
2506
2507/** @} */
2508
2509/**********************************************************************
2510Let us list the consistency conditions for different control block states.
2511
2512NOT_USED: is in free list, not in LRU list, not in flush list, nor
2513 page hash table
2514READY_FOR_USE: is not in free list, LRU list, or flush list, nor page
2515 hash table
2516MEMORY: is not in free list, LRU list, or flush list, nor page
2517 hash table
2518FILE_PAGE: space and offset are defined, is in page hash table
2519 if io_fix == BUF_IO_WRITE,
2520 pool: no_flush[flush_type] is in reset state,
2521 pool: n_flush[flush_type] > 0
2522
2523 (1) if buf_fix_count == 0, then
2524 is in LRU list, not in free list
2525 is in flush list,
2526 if and only if oldest_modification > 0
2527 is x-locked,
2528 if and only if io_fix == BUF_IO_READ
2529 is s-locked,
2530 if and only if io_fix == BUF_IO_WRITE
2531
2532 (2) if buf_fix_count > 0, then
2533 is not in LRU list, not in free list
2534 is in flush list,
2535 if and only if oldest_modification > 0
2536 if io_fix == BUF_IO_READ,
2537 is x-locked
2538 if io_fix == BUF_IO_WRITE,
2539 is s-locked
2540
2541State transitions:
2542
2543NOT_USED => READY_FOR_USE
2544READY_FOR_USE => MEMORY
2545READY_FOR_USE => FILE_PAGE
2546MEMORY => NOT_USED
2547FILE_PAGE => NOT_USED NOTE: This transition is allowed if and only if
2548 (1) buf_fix_count == 0,
2549 (2) oldest_modification == 0, and
2550 (3) io_fix == 0.
2551*/
2552
2553#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2554#ifndef UNIV_HOTBACKUP
2555/** Functor to validate the LRU list. */
2557 void operator()(const buf_page_t *elem) const { ut_a(elem->in_LRU_list); }
2558
2559 static void validate(const buf_pool_t *buf_pool) {
2560 CheckInLRUList check;
2561 ut_list_validate(buf_pool->LRU, check);
2562 }
2563};
2564
2565/** Functor to validate the LRU list. */
2567 void operator()(const buf_page_t *elem) const { ut_a(elem->in_free_list); }
2568
2569 static void validate(const buf_pool_t *buf_pool) {
2570 CheckInFreeList check;
2571 ut_list_validate(buf_pool->free, check);
2572 }
2573};
2574
2576 void operator()(const buf_block_t *elem) const {
2577 ut_a(elem->page.in_LRU_list);
2578 ut_a(elem->in_unzip_LRU_list);
2579 }
2580
2581 static void validate(const buf_pool_t *buf_pool) {
2583 ut_list_validate(buf_pool->unzip_LRU, check);
2584 }
2585};
2586#endif /* !UNIV_HOTBACKUP */
2587#endif /* UNIV_DEBUG || defined UNIV_BUF_DEBUG */
2588
2589#ifndef UNIV_HOTBACKUP
2590/** Prepare a page before adding to the free list.
2591@param[in,out] bpage Buffer page to prepare for freeing. */
2592inline void buf_page_prepare_for_free(buf_page_t *bpage) noexcept {
2593 bpage->reset_page_id();
2594}
2595#endif /* !UNIV_HOTBACKUP */
2596
2597/** Gets the compressed page descriptor corresponding to an uncompressed
2598page if applicable.
2599@param[in] block Get the zip descriptor for this block. */
2601 return block->get_page_zip();
2602}
2603
2604/** Gets the compressed page descriptor corresponding to an uncompressed
2605page if applicable. Const version.
2606@param[in] block Get the zip descriptor for this block.
2607@return page descriptor or nullptr. */
2609 const buf_block_t *block) noexcept {
2610 return block->get_page_zip();
2611}
2612#include "buf0buf.ic"
2613
2614#endif /* !buf0buf_h */
uint32_t space_id_t
Tablespace identifier.
Definition: api0api.h:50
uint32_t page_no_t
Page number.
Definition: api0api.h:48
Cache_hint
Definition: buf0buf.h:90
@ MAKE_YOUNG
Move the block to the start of the LRU list if there is a danger that the block would drift out of th...
@ KEEP_OLD
Preserve the current LRU position of the block.
static void buf_block_dbg_add_level(buf_block_t *block, latch_level_t level)
Adds latch level info for the rw-lock protecting the buffer frame.
bool buf_block_hash_lock_held_x(const buf_pool_t *buf_pool, const buf_block_t *block)
Definition: buf0buf.h:2489
static buf_block_t * buf_page_get_block(buf_page_t *bpage)
Gets the buf_block_t handle of a buffered file block if an uncompressed page frame exists,...
static bool buf_page_belongs_to_unzip_LRU(const buf_page_t *bpage)
Determines if a block should be on unzip_LRU list.
static bool buf_page_can_relocate(const buf_page_t *bpage)
Determine if a buffer block can be relocated in memory.
buf_page_t * buf_page_get_zip(const page_id_t &page_id, const page_size_t &page_size)
Get read access to a compressed page (usually of type FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2).
Definition: buf0buf.cc:3221
dberr_t buf_pool_init(ulint total_size, ulint n_instances)
Creates the buffer pool.
Definition: buf0buf.cc:1437
void buf_page_print(const byte *read_buf, const page_size_t &page_size, ulint flags)
Prints a page to stderr.
Definition: buf0buf.cc:545
ulint buf_get_n_pending_read_ios(void)
Returns the number of pending buf pool read ios.
Definition: buf0buf.cc:6393
bool buf_page_hash_lock_held_s_or_x(const buf_pool_t *buf_pool, const buf_page_t *bpage)
Test if page_hash lock is held in x or s-mode.
Definition: buf0buf.h:2478
static ulint buf_pool_get_curr_size(void)
Gets the current size of buffer buf_pool in bytes.
void buf_pool_wait_for_no_pending_io_reads()
Waits until there are no pending I/O read operations for the buffer pool.
Definition: buf0buf.cc:6779
ulint buf_page_hash_lock_held_x(const buf_pool_t *buf_pool, const buf_page_t *bpage)
Test if page_hash lock is held in x-mode.
Definition: buf0buf.h:2471
bool buf_validate(void)
Validates the buffer pool data structure.
Definition: buf0buf.cc:6179
void assert_block_ahi_empty_on_init(buf_block_t *block)
Definition: buf0buf.h:1957
void buf_flush_update_zip_checksum(buf_frame_t *page, ulint size, lsn_t lsn, bool skip_lsn_check)
Calculate the checksum of a page from compressed table and update the page.
Definition: buf0flu.cc:955
static enum buf_page_state buf_block_get_state(const buf_block_t *block)
Gets the state of a block.
static void buf_page_set_state(buf_page_t *bpage, enum buf_page_state state)
Sets the state of a block.
buf_block_t * buf_block_from_ahi(const byte *ptr)
Get a buffer block from an adaptive hash index pointer.
Definition: buf0buf.cc:3433
lsn_t buf_pool_get_oldest_modification_lwm(void)
Gets a safe low watermark for oldest_modification.
Definition: buf0buf.cc:418
bool buf_frame_will_withdrawn(buf_pool_t *buf_pool, const byte *ptr)
Determines if a frame is intended to be withdrawn.
Definition: buf0buf.cc:1805
buf_page_t * buf_page_init_for_read(dberr_t *err, ulint mode, const page_id_t &page_id, const page_size_t &page_size, bool unzip)
Inits a page for read to the buffer buf_pool.
Definition: buf0buf.cc:4809
void buf_pool_update_madvise()
Checks if innobase_should_madvise_buf_pool() value has changed since we've last check and if so,...
Definition: buf0buf.cc:950
ulonglong buf_pool_adjust_chunk_unit(ulonglong size)
Adjust the proposed chunk unit size so that it satisfies all invariants.
Definition: buf0buf.cc:2088
static buf_page_t * buf_page_hash_get_locked(buf_pool_t *buf_pool, const page_id_t &page_id, rw_lock_t **lock, ulint lock_mode, bool watch=false)
Returns the control block of a file page, NULL if not found.
static enum buf_io_fix buf_page_get_io_fix(const buf_page_t *bpage)
Gets the io_fix state of a block.
static void buf_page_set_io_fix(buf_page_t *bpage, enum buf_io_fix io_fix)
Sets the io_fix state of a block.
buf_page_t * buf_page_set_file_page_was_freed(const page_id_t &page_id)
Sets file_page_was_freed true if the page is found in the buffer pool.
Definition: buf0buf.cc:3134
static void buf_page_set_accessed(buf_page_t *bpage)
Flag a block accessed.
static void buf_block_set_io_fix(buf_block_t *block, enum buf_io_fix io_fix)
Sets the io_fix state of a block.
void buf_page_make_young(buf_page_t *bpage)
Moves a page to the start of the buffer pool LRU list.
Definition: buf0buf.cc:3088
buf_pool_t * buf_pool_ptr
The buffer pools of the database.
Definition: buf0buf.cc:299
void buf_pool_clear_hash_index(void)
Clears the adaptive hash index on all pages in the buffer pool.
Definition: buf0buf.cc:2661
void buf_get_total_stat(buf_pool_stat_t *tot_stat)
Get total buffer pool statistics.
Definition: buf0buf.cc:490
static buf_pool_t * buf_pool_from_bpage(const buf_page_t *bpage)
Returns the buffer pool instance given a page instance.
static buf_flush_t buf_page_get_flush_type(const buf_page_t *bpage)
Get the flush type of a page.
static ulint buf_page_get_freed_page_clock(const buf_page_t *bpage)
Reads the freed_page_clock of a buffer block.
ulint buf_page_hash_lock_held_s(const buf_pool_t *buf_pool, const buf_page_t *bpage)
Test if page_hash lock is held in s-mode.
Definition: buf0buf.h:2465
static std::chrono::steady_clock::time_point buf_page_is_accessed(const buf_page_t *bpage)
Determine the time of first access of a block in the buffer pool.
constexpr ulint MAX_PAGE_HASH_LOCKS
The maximum number of page_hash locks.
Definition: buf0buf.h:111
bool buf_page_optimistic_get(ulint rw_latch, buf_block_t *block, uint64_t modify_clock, Page_fetch fetch_mode, const char *file, ulint line, mtr_t *mtr)
This is the general function used to get optimistic access to a database page.
Definition: buf0buf.cc:4431
void buf_pool_watch_unset(const page_id_t &page_id)
Stop watching if the page has been read in.
Definition: buf0buf.cc:3037
static byte * buf_frame_copy(byte *buf, const buf_frame_t *frame)
Copies contents of a buffer frame to a given buffer.
bool buf_pool_watch_occurred(const page_id_t &page_id)
Check if the page has been read in.
Definition: buf0buf.cc:3064
static bool buf_page_peek_if_young(const buf_page_t *bpage)
Tells, for heuristics, if a block is still close enough to the MRU end of the LRU list meaning that i...
bool buf_page_free_stale(buf_pool_t *buf_pool, buf_page_t *bpage) noexcept
Free a stale page.
Definition: buf0buf.cc:5332
buf_page_t * buf_page_hash_get_x_locked(buf_pool_t *b, const page_id_t &page_id, rw_lock_t **l)
Definition: buf0buf.h:1009
bool buf_page_io_complete(buf_page_t *bpage, bool evict)
Completes an asynchronous read or write request of a file page to or from the buffer pool.
Definition: buf0buf.cc:5620
static void buf_page_set_old(buf_page_t *bpage, bool old)
Flag a block old.
size_t buf_pool_pending_io_reads_count()
Computes number of pending I/O read operations for the buffer pool.
Definition: buf0buf.cc:6758
size_t buf_pool_pending_io_writes_count()
Computes number of pending I/O write operations for the buffer pool.
Definition: buf0buf.cc:6766
void buf_read_page_handle_error(buf_page_t *bpage)
Unfixes the page, unlatches the page, removes it from page_hash and removes it from LRU.
Definition: buf0buf.cc:5297
static ulint buf_block_unfix(buf_page_t *bpage)
Decrements the bufferfix count.
static void buf_page_set_flush_type(buf_page_t *bpage, buf_flush_t flush_type)
Set the flush type of a page.
void buf_page_free_stale_during_write(buf_page_t *bpage, bool owns_sx_lock=false) noexcept
Free a stale page that is being written.
Definition: buf0buf.cc:5415
static buf_pool_t * buf_pool_from_block(const buf_block_t *block)
Returns the buffer pool instance given a block instance.
bool buf_is_block_in_instance(const buf_pool_t *buf_pool, const buf_block_t *ptr)
Find out if a block pointer points into one of currently used chunks of the buffer pool.
Definition: buf0buf.cc:3469
rw_lock_t * buf_page_hash_lock_get(const buf_pool_t *buf_pool, const page_id_t page_id)
Get appropriate page_hash_lock.
Definition: buf0buf.h:2444
std::ostream & operator<<(std::ostream &out, const buf_pool_t &buf_pool)
Print the given buf_pool_t object.
Definition: buf0buf.cc:6839
static BPageMutex * buf_page_get_mutex(const buf_page_t *bpage)
Gets the mutex of a block.
static uint64_t buf_pool_hash_zip_frame(void *ptr)
Compute the hash value for blocks in buf_pool->zip_hash.
Definition: buf0buf.h:1882
static buf_frame_t * buf_block_get_frame(const buf_block_t *block)
Gets a pointer to the memory frame of a block.
void buf_pool_free_all()
Frees the buffer pool at shutdown.
Definition: buf0buf.cc:6873
static enum buf_page_state buf_page_get_state(const buf_page_t *bpage)
Gets the state of a block.
static ulint buf_pool_index(const buf_pool_t *buf_pool)
Calculates the index of a buffer pool to the buf_pool[] array.
buf_block_t * buf_page_get_gen(const page_id_t &page_id, const page_size_t &page_size, ulint rw_latch, buf_block_t *guess, Page_fetch mode, ut::Location location, mtr_t *mtr, bool dirty_with_no_latch=false)
This is the general function used to get access to a database page.
Definition: buf0buf.cc:4365
ulint buf_get_free_list_len(void)
Gets the current length of the free list of buffer blocks.
rw_lock_t * buf_page_hash_lock_x_confirm(rw_lock_t *hash_lock, buf_pool_t *buf_pool, const page_id_t &page_id)
Definition: buf0buf.h:2456
static lsn_t buf_page_get_newest_modification(const buf_page_t *bpage)
Gets the youngest modification log sequence number for a frame.
ulint buf_get_latched_pages_number(void)
Returns the number of latched pages in the buffer pool.
Definition: buf0buf.cc:6374
static buf_page_t * buf_page_hash_get_low(buf_pool_t *buf_pool, const page_id_t &page_id)
Returns the control block of a file page, NULL if not found.
void assert_block_ahi_empty(buf_block_t *block)
Definition: buf0buf.h:1953
static ulint buf_block_fix(buf_page_t *bpage)
Increments the bufferfix count.
constexpr ulint MAX_BUFFER_POOLS
The maximum number of buffer pools that can be defined.
Definition: buf0buf.h:105
static buf_block_t * buf_block_hash_get_locked(buf_pool_t *buf_pool, const page_id_t &page_id, rw_lock_t **lock, ulint lock_mode)
Returns the control block of a file page, NULL if not found.
void buf_print_io(FILE *file)
Prints info of the buffer i/o.
Definition: buf0buf.cc:6675
static buf_pool_t * buf_pool_from_array(ulint index)
Returns the buffer pool instance given its array index.
static void buf_page_release_latch(buf_block_t *block, ulint rw_latch)
Releases a latch, if specified.
void buf_stats_get_pool_info(buf_pool_t *buf_pool, ulint pool_id, buf_pool_info_t *all_pool_info)
Collect buffer pool stats information for a buffer pool.
Definition: buf0buf.cc:6476
void buf_resize_thread()
This is the thread for resizing buffer pool.
Definition: buf0buf.cc:2634
void buf_print(void)
Prints info of the buffer pool data structure.
Definition: buf0buf.cc:6280
const buf_block_t * buf_page_try_get(const page_id_t &page_id, ut::Location location, mtr_t *mtr)
Given a tablespace id and page number tries to get that page.
Definition: buf0buf.cc:4624
static ulint buf_block_get_freed_page_clock(const buf_block_t *block)
Reads the freed_page_clock of a buffer block.
static void buf_ptr_get_fsp_addr(const void *ptr, space_id_t *space, fil_addr_t *addr)
Gets the space id, page offset, and byte offset within page of a pointer pointing to a buffer frame c...
static bool buf_page_peek_if_too_old(const buf_page_t *bpage)
Recommends a move of a block to the start of the LRU list if there is danger of dropping from the buf...
bool buf_block_will_withdrawn(buf_pool_t *buf_pool, const buf_block_t *block)
Determines if a block is intended to be withdrawn.
Definition: buf0buf.cc:1784
void buf_page_make_old(buf_page_t *bpage)
Moved a page to the end of the buffer pool LRU list so that it can be flushed out at the earliest.
Definition: buf0buf.cc:3100
lsn_t buf_pool_get_oldest_modification_approx(void)
Gets the smallest oldest_modification lsn among all of the earliest added pages in flush lists.
Definition: buf0buf.cc:365
buf_block_t * buf_block_hash_get_s_locked(buf_pool_t *b, const page_id_t &page_id, rw_lock_t **l)
Definition: buf0buf.h:1022
void buf_must_be_all_freed(void)
Assert that all file pages in the buffer are in a replaceable state.
Definition: buf0buf.cc:6748
Page_fetch
Definition: buf0buf.h:57
@ NO_LATCH
get and bufferfix, but set no latch; we have separated this case, because it is error-prone programmi...
@ NORMAL
Get always.
@ IF_IN_POOL
get if in pool
@ IF_IN_POOL_OR_WATCH
Get the page only if it's in the buffer pool, if not then set a watch on the page.
@ PEEK_IF_IN_POOL
get if in pool, do not make the block young in the LRU list
@ POSSIBLY_FREED
Like Page_fetch::NORMAL, but do not mind if the file page has been freed.
@ SCAN
Same as NORMAL, but hint that the fetch is part of a large scan.
rw_lock_t * buf_page_hash_lock_s_confirm(rw_lock_t *hash_lock, const buf_pool_t *buf_pool, const page_id_t page_id)
If not appropriate page_hash_lock, relock until appropriate.
Definition: buf0buf.h:2450
static void buf_page_unset_sticky(buf_page_t *bpage)
Removes stickiness of a block.
buf_page_print_flags
Definition: buf0buf.h:631
@ BUF_PAGE_PRINT_NO_FULL
Do not print the full page dump.
Definition: buf0buf.h:635
@ BUF_PAGE_PRINT_NO_CRASH
Do not crash at the end of buf_page_print().
Definition: buf0buf.h:633
buf_page_t * buf_page_get_also_watch(buf_pool_t *b, const page_id_t &page_id)
Definition: buf0buf.h:1017
bool buf_pool_watch_is_sentinel(const buf_pool_t *buf_pool, const buf_page_t *bpage)
Determine if a block is a sentinel for a buffer pool watch.
Definition: buf0buf.cc:2882
buf_block_t * buf_page_get_with_no_latch(const page_id_t &id, const page_size_t &size, ut::Location location, mtr_t *mtr)
Use these macros to bufferfix a page with no latching.
Definition: buf0buf.h:436
constexpr ulint MAX_BUFFER_POOLS_BITS
Number of bits to representing a buffer pool ID.
Definition: buf0buf.h:102
bool buf_block_hash_lock_held_s(const buf_pool_t *buf_pool, const buf_block_t *block)
Definition: buf0buf.h:2484
buf_block_t * buf_block_hash_get(buf_pool_t *b, const page_id_t &page_id)
Definition: buf0buf.h:1032
static bool buf_page_peek(const page_id_t &page_id)
Returns true if the page can be found in the buffer pool hash table.
bool buf_page_get_known_nowait(ulint rw_latch, buf_block_t *block, Cache_hint hint, const char *file, ulint line, mtr_t *mtr)
This is used to get access to a known database page, when no waiting can be done.
Definition: buf0buf.cc:4531
buf_page_t * buf_page_hash_get_s_locked(buf_pool_t *b, const page_id_t &page_id, rw_lock_t **l)
Definition: buf0buf.h:1004
static void buf_page_release_zip(buf_page_t *bpage)
Releases a compressed-only page acquired with buf_page_get_zip().
static void buf_block_free(buf_block_t *block)
Frees a buffer block which does not contain a file page.
static uint64_t buf_pool_hash_zip(buf_block_t *b)
Definition: buf0buf.h:1886
buf_page_t * buf_page_hash_get(buf_pool_t *b, const page_id_t &page_id)
Definition: buf0buf.h:1014
static void buf_block_set_state(buf_block_t *block, enum buf_page_state state)
Sets the state of a block.
constexpr uint32_t BUF_PAGE_STATE_BITS
The common buffer control block structure for compressed and uncompressed frames.
Definition: buf0buf.h:1116
static void buf_block_buf_fix_inc_func(ut::Location location, buf_block_t *block)
Increments the bufferfix count.
static buf_pool_t * buf_pool_get(const page_id_t &page_id)
Returns the buffer pool instance given a page id.
static ulint buf_get_withdraw_depth(buf_pool_t *buf_pool)
Return how many more pages must be added to the withdraw list to reach the withdraw target of the cur...
buf_page_state
States of a control block.
Definition: buf0buf.h:127
@ BUF_BLOCK_NOT_USED
Is in the free list; must be after the BUF_BLOCK_ZIP_ constants for compressed-only pages.
Definition: buf0buf.h:137
@ BUF_BLOCK_ZIP_PAGE
Contains a clean compressed page.
Definition: buf0buf.h:131
@ BUF_BLOCK_REMOVE_HASH
Hash index should be removed before putting to the free list.
Definition: buf0buf.h:149
@ BUF_BLOCK_MEMORY
Contains some main memory object.
Definition: buf0buf.h:146
@ BUF_BLOCK_ZIP_DIRTY
Contains a compressed page that is in the buf_pool->flush_list.
Definition: buf0buf.h:133
@ BUF_BLOCK_POOL_WATCH
A sentinel for the buffer pool watch, element of buf_pool->watch[].
Definition: buf0buf.h:129
@ BUF_BLOCK_READY_FOR_USE
When buf_LRU_get_free_block returns a block, it is in this state.
Definition: buf0buf.h:140
@ BUF_BLOCK_FILE_PAGE
Contains a buffered file page.
Definition: buf0buf.h:143
void buf_refresh_io_stats_all()
Refresh the statistics used to print per-second averages.
Definition: buf0buf.cc:6737
static ulint buf_pool_get_n_pages(void)
Gets the current size of buffer buf_pool in frames.
buf_block_t * buf_block_alloc(buf_pool_t *buf_pool)
Allocates a buffer block.
Definition: buf0buf.cc:516
static buf_block_t * buf_get_nth_chunk_block(const buf_pool_t *buf_pool, ulint n, ulint *chunk_size)
Get the nth chunk's buffer block in the specified buffer pool.
static ulint buf_pool_size_align(ulint size)
Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, if needed.
buf_block_t * buf_page_get(const page_id_t &id, const page_size_t &size, ulint latch, ut::Location location, mtr_t *mtr)
NOTE! The following macros should be used instead of buf_page_get_gen, to improve debugging.
Definition: buf0buf.h:425
static void buf_block_modify_clock_inc(buf_block_t *block)
Increment the modify clock.
static bool buf_page_in_file(const buf_page_t *bpage)
Determines if a block is mapped to a tablespace.
page_zip_des_t * buf_block_get_page_zip(buf_block_t *block) noexcept
Gets the compressed page descriptor corresponding to an uncompressed page if applicable.
Definition: buf0buf.h:2600
void buf_pool_invalidate(void)
Invalidates the file pages in the buffer pool when an archive recovery is completed.
Definition: buf0buf.cc:5983
void buf_page_free_descriptor(buf_page_t *bpage)
Free a buf_page_t descriptor.
Definition: buf0buf.cc:1353
buf_block_t * buf_page_create(const page_id_t &page_id, const page_size_t &page_size, rw_lock_type_t rw_latch, mtr_t *mtr)
Initializes a page to the buffer buf_pool.
Definition: buf0buf.cc:5017
double buf_get_modified_ratio_pct(void)
Return the ratio in percents of modified pages in the buffer pool / database pages in the buffer pool...
Definition: buf0buf.cc:6407
static buf_frame_t * buf_frame_align(byte *ptr)
void buf_page_prepare_for_free(buf_page_t *bpage) noexcept
Prepare a page before adding to the free list.
Definition: buf0buf.h:2592
buf_block_t * buf_pool_contains_zip(buf_pool_t *buf_pool, const void *data)
Finds a block in the buffer pool that points to a given compressed page.
Definition: buf0buf.cc:1107
static void buf_block_set_file_page(buf_block_t *block, const page_id_t &page_id)
Map a block to a file page.
static buf_page_t * buf_page_alloc_descriptor(void)
Allocates a buf_page_t descriptor.
static enum buf_io_fix buf_block_get_io_fix(const buf_block_t *block)
Gets the io_fix state of a block.
bool buf_block_hash_lock_held_s_or_x(const buf_pool_t *buf_pool, const buf_block_t *block)
Definition: buf0buf.h:2494
bool buf_block_state_valid(buf_block_t *block)
Check if a buf_block_t object is in a valid state.
Definition: buf0buf.h:1875
buf_block_t * buf_block_hash_get_x_locked(buf_pool_t *b, const page_id_t &page_id, rw_lock_t **l)
Definition: buf0buf.h:1027
buf_page_t * buf_page_reset_file_page_was_freed(const page_id_t &page_id)
Sets file_page_was_freed false if the page is found in the buffer pool.
Definition: buf0buf.cc:3160
static bool buf_page_is_old(const buf_page_t *bpage)
Determine if a block has been flagged old.
void buf_block_buf_fix_inc(buf_block_t *b, ut::Location l)
Increments the bufferfix count.
Definition: buf0buf.h:587
static void buf_page_set_sticky(buf_page_t *bpage)
Makes a block sticky.
void buf_get_total_list_size_in_bytes(buf_pools_list_size_t *buf_pools_list_size)
Get total list size in bytes from all buffer pools.
Definition: buf0buf.cc:469
void buf_get_total_list_len(ulint *LRU_len, ulint *free_len, ulint *flush_list_len)
Get total buffer pool statistics.
Definition: buf0buf.cc:449
bool buf_zip_decompress(buf_block_t *block, bool check)
Decompress a block.
Definition: buf0buf.cc:3365
The database buffer buf_pool.
The database buffer pool global types for the directory.
byte buf_frame_t
A buffer frame.
Definition: buf0types.h:61
BPageMutex BufPoolZipMutex
Definition: buf0types.h:200
constexpr uint32_t BUF_BUDDY_LOW
Smallest buddy page size.
Definition: buf0types.h:182
constexpr uint32_t BUF_BUDDY_SIZES_MAX
Maximum number of buddy sizes based on the max page size.
Definition: buf0types.h:188
buf_flush_t
Flags for flush types.
Definition: buf0types.h:67
@ BUF_FLUSH_N_TYPES
Index of last element + 1
Definition: buf0types.h:78
ib_mutex_t BufListMutex
Definition: buf0types.h:198
ib_bpmutex_t BPageMutex
Definition: buf0types.h:197
buf_io_fix
Flags for io_fix types.
Definition: buf0types.h:98
@ BUF_IO_NONE
no pending I/O
Definition: buf0types.h:100
@ BUF_IO_WRITE
write pending
Definition: buf0types.h:106
@ BUF_IO_READ
read pending
Definition: buf0types.h:103
@ BUF_IO_PIN
disallow relocation of block and its removal from the flush_list
Definition: buf0types.h:109
The database buffer pool high-level routines.
Class implementing buf_pool->flush_list hazard pointer.
Definition: buf0buf.h:1964
~FlushHp() override=default
Destructor.
FlushHp(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
Constructor.
Definition: buf0buf.h:1969
void adjust(const buf_page_t *bpage) override
Adjust the value of hp.
Definition: buf0buf.cc:2842
We use Flush_observer to track flushing of non-redo logged pages in bulk create index(btr0load....
Definition: buf0flu.h:268
A "Hazard Pointer" class used to iterate over page lists inside the buffer pool.
Definition: buf0buf.h:1895
bool is_hp(const buf_page_t *bpage)
Checks if a bpage is the hp.
Definition: buf0buf.cc:2817
void set(buf_page_t *bpage)
Set current value.
Definition: buf0buf.cc:2804
virtual void adjust(const buf_page_t *bpage)=0
Adjust the value of hp.
const ib_mutex_t * m_mutex
mutex that protects access to the m_hp.
Definition: buf0buf.h:1945
void move(const buf_page_t *bpage, buf_page_t *dpage)
Adjust the value of hp for moving.
Definition: buf0buf.cc:2829
buf_page_t * m_hp
hazard pointer.
Definition: buf0buf.h:1949
virtual ~HazardPointer()=default
Destructor.
const buf_pool_t * m_buf_pool
Buffer pool instance.
Definition: buf0buf.h:1941
HazardPointer(const HazardPointer &)
Disable copying.
buf_page_t * get() const
Get current value.
Definition: buf0buf.h:1907
HazardPointer(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
Constructor.
Definition: buf0buf.h:1900
HazardPointer & operator=(const HazardPointer &)
Class implementing buf_pool->LRU hazard pointer.
Definition: buf0buf.h:1983
~LRUHp() override=default
Destructor.
void adjust(const buf_page_t *bpage) override
Adjust the value of hp.
Definition: buf0buf.cc:2857
LRUHp(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
Constructor.
Definition: buf0buf.h:1988
Special purpose iterators to be used when scanning the LRU list.
Definition: buf0buf.h:2005
~LRUItr() override=default
Destructor.
buf_page_t * start()
Selects from where to start a scan.
Definition: buf0buf.cc:2872
LRUItr(const buf_pool_t *buf_pool, const ib_mutex_t *mutex)
Constructor.
Definition: buf0buf.h:2010
The purpose of this class is to hide the knowledge that Buf_io_fix_latching_rules even exists from us...
Definition: buf0buf.cc:5526
Definition: buf0buf.h:1367
bool someone_is_responsible() const
Checks if there is any thread responsible for I/O on this page now.
Definition: buf0buf.h:1375
void release()
Called by the thread responsible for I/O on this page to release its responsibility.
Definition: buf0buf.h:1387
bool current_thread_is_responsible() const
Checks if the current thread is responsible for I/O on this page now.
Definition: buf0buf.h:1381
void take()
Called by the thread which becomes responsible for I/O on this page to indicate that it takes the res...
Definition: buf0buf.h:1394
std::thread::id responsible_thread
The thread responsible for I/O on this page, or an impossible value if no thread is currently respons...
Definition: buf0buf.h:1370
Definition: buf0buf.h:1126
uint32_t freed_page_clock
The value of buf_pool->freed_page_clock when this block was the last time put to the head of the LRU ...
Definition: buf0buf.h:1611
buf_page_t(const buf_page_t &other)
Copy constructor.
Definition: buf0buf.h:1130
void space_id_changed()
Updates new space reference and acquires "reference count latch" and the current version of the space...
Definition: buf0buf.h:1259
void take_io_responsibility()
Called by the thread which becomes responsible for I/O on this page to indicate that it takes the res...
Definition: buf0buf.h:1421
bool is_stale() const
Checks if this space reference saved during last page ID initialization was deleted or truncated sinc...
Definition: buf0buf.h:1192
void init_io_fix()
This is called only when having full ownership of the page object and no other thread can reach it.
Definition: buf0buf.h:1451
void set_io_fix(buf_io_fix io_fix)
Sets io_fix to specified value.
Definition: buf0buf.cc:5600
bool someone_has_io_responsibility() const
Checks if there is any thread responsible for I/O on this page now.
Definition: buf0buf.h:1405
bool has_correct_io_fix_value() const
Checks if io_fix has any of the known enum values.
Definition: buf0buf.h:1354
lsn_t get_newest_lsn() const noexcept
Definition: buf0buf.h:1291
bool current_thread_has_io_responsibility() const
Checks if the current thread is responsible for I/O on this page now.
Definition: buf0buf.h:1411
void set_newest_lsn(lsn_t lsn) noexcept
Set the latest modification LSN.
Definition: buf0buf.h:1302
buf_page_t * hash
Node used in chaining to buf_pool->page_hash or buf_pool->zip_hash.
Definition: buf0buf.h:1541
bool is_io_fix_write() const
Checks if the current value of io_fix is BUF_IO_WRITE.
Definition: buf0buf.cc:5580
Flush_observer * get_flush_observer() noexcept
Definition: buf0buf.h:1275
fil_space_t * get_space() const
Retrieve the tablespace object if one was available during page ID initialization.
Definition: buf0buf.h:1231
bool is_io_fix_read_as_opposed_to_write() const
Assuming that io_fix is either BUF_IO_READ or BUF_IO_WRITE determines which of the two it is.
Definition: buf0buf.cc:5592
space_id_t space() const noexcept
Retrieve the tablespace id.
Definition: buf0buf.h:1181
bool in_zip_hash
true if in buf_pool->zip_hash
Definition: buf0buf.h:1656
bool in_free_list
true if in buf_pool->free; when buf_pool->free_list_mutex is free, the following should hold: in_free...
Definition: buf0buf.h:1647
std::chrono::steady_clock::time_point access_time
Time of first access, or 0 if the block was never accessed in the buffer pool.
Definition: buf0buf.h:1620
Flush_observer * m_flush_observer
Flush observer instance.
Definition: buf0buf.h:1603
buf_fix_count_atomic_t buf_fix_count
Count of how many fold this block is currently bufferfixed.
Definition: buf0buf.h:1325
bool in_flush_list
true if in buf_pool->flush_list; when buf_pool->flush_list_mutex is free, the following should hold: ...
Definition: buf0buf.h:1643
void reset_page_id()
Sets stored value to invalid/empty value.
Definition: buf0buf.h:1252
bool is_dirty() const noexcept
Definition: buf0buf.h:1298
bool old
true if the block is in the old blocks in buf_pool->LRU_old
Definition: buf0buf.h:1629
bool was_io_fix_read() const
Checks if io_fix is BUF_IO_READ without requiring or acquiring any latches.
Definition: buf0buf.h:1511
void reinit_io_fix()
This is called only when having full ownership of the page object and no other thread can reach it.
Definition: buf0buf.h:1463
void reset_page_id(page_id_t new_page_id)
Sets stored page ID to the new value.
Definition: buf0buf.h:1236
void set_oldest_lsn(lsn_t lsn) noexcept
Set the LSN when the page is modified for the first time.
Definition: buf0buf.ic:744
uint16_t get_dblwr_batch_id() const
Definition: buf0buf.h:1177
bool was_stale() const
Checks if this space reference saved during last page ID initialization was deleted or truncated sinc...
Definition: buf0buf.h:1209
lsn_t newest_modification
The flush LSN, LSN when this page was written to the redo log.
Definition: buf0buf.h:1574
void release_io_responsibility()
Called by the thread responsible for I/O on this page to release its responsibility.
Definition: buf0buf.h:1417
lsn_t get_oldest_lsn() const noexcept
Definition: buf0buf.h:1295
lsn_t oldest_modification
log sequence number of the youngest modification to this block, zero if not modified.
Definition: buf0buf.h:1578
page_id_t id
Page id.
Definition: buf0buf.h:1319
copyable_atomic_t< buf_io_fix > io_fix
Type of pending I/O operation.
Definition: buf0buf.h:1331
UT_LIST_NODE_T(buf_page_t) list
Based on state, this is a list node, protected by the corresponding list mutex, in one of the followi...
buf_io_fix get_io_fix_snapshot() const
Retrieves a value of io_fix without requiring or acquiring any latches.
Definition: buf0buf.h:1437
page_size_t size
Page size.
Definition: buf0buf.h:1322
io_responsibility_t io_responsibility
Tracks which thread is responsible for I/O on this page.
Definition: buf0buf.h:1400
uint8_t buf_pool_index
Index number of the buffer pool that this block belongs to.
Definition: buf0buf.h:1538
uint16_t m_dblwr_id
Double write instance ordinal value during writes.
Definition: buf0buf.h:1625
bool was_io_fix_none() const
Checks if io_fix is BUF_IO_NONE without requiring or acquiring any latches.
Definition: buf0buf.h:1528
void set_clean() noexcept
Set page to clean state.
Definition: buf0buf.h:1309
buf_io_fix get_io_fix() const
Retrieves the current value of io_fix.
Definition: buf0buf.h:1479
bool is_io_fix_read() const
Checks if the current value of io_fix is BUF_IO_READ.
Definition: buf0buf.cc:5586
uint32_t m_version
Version of fil_space_t when the page was updated.
Definition: buf0buf.h:1616
bool in_LRU_list
true if the page is in the LRU list; used in debugging
Definition: buf0buf.h:1650
void set_flush_observer(Flush_observer *flush_observer) noexcept
Set the flush observer for the page.
Definition: buf0buf.h:1279
buf_page_state state
Block state.
Definition: buf0buf.h:1531
page_no_t page_no() const noexcept
Retrieve the page number.
Definition: buf0buf.h:1185
fil_space_t * m_space
Tablespace instance that this page belongs to.
Definition: buf0buf.h:1606
bool was_io_fixed() const
Checks if io_fix is BUF_IO_FIX or BUF_IO_READ or BUF_IO_WRITE without requiring or acquiring any latc...
Definition: buf0buf.h:1518
void reset_flush_observer() noexcept
Remove the flush observer.
Definition: buf0buf.h:1287
void set_dblwr_batch_id(uint16_t batch_id)
Set the doublewrite buffer ID.
Definition: buf0buf.h:1174
bool file_page_was_freed
This is set to true when fsp frees a page in buffer pool; protected by buf_pool->zip_mutex or buf_blo...
Definition: buf0buf.h:1634
UT_LIST_NODE_T(buf_page_t) LRU
node of the LRU list
friend class Latching_rules_helpers
Definition: buf0buf.h:1359
buf_flush_t flush_type
If this block is currently being flushed to disk, this tells the flush_type.
Definition: buf0buf.h:1535
bool in_page_hash
true if in buf_pool->page_hash
Definition: buf0buf.h:1653
page_zip_des_t zip
compressed page; zip.data (but not the data it points to) is protected by buf_pool->zip_mutex; state ...
Definition: buf0buf.h:1599
static bool is_correct_io_fix_value(buf_io_fix io_fix)
Checks if io_fix has any of the known enum values.
Definition: buf0buf.h:1339
Definition: buf0buf.h:1119
copyable_atomic_t(const copyable_atomic_t< T > &other)
Definition: buf0buf.h:1121
Definition: hash0hash.h:373
Page identifier.
Definition: buf0types.h:206
uint64_t hash() const
Retrieve the hash value.
Definition: buf0types.h:246
page_no_t page_no() const
Retrieve the page number.
Definition: buf0types.h:242
Page size descriptor.
Definition: page0size.h:49
int page
Definition: ctype-mb.cc:1235
dberr_t
Definition: db0err.h:38
fil_space_t * fil_space_get(space_id_t space_id)
Look up a tablespace.
Definition: fil0fil.cc:2229
The low-level file system.
uint16_t page_type_t
Definition: fil0fil.h:1184
constexpr uint32_t FIL_PAGE_TYPE
file page type: FIL_PAGE_INDEX,..., 2 bytes.
Definition: fil0types.h:75
constexpr uint32_t FIL_PAGE_DATA
start of the data on the page
Definition: fil0types.h:110
constexpr uint32_t FIL_PAGE_NEXT
if there is a 'natural' successor of the page, its offset.
Definition: fil0types.h:60
constexpr uint32_t FIL_PAGE_PREV
if there is a 'natural' predecessor of the page, its offset.
Definition: fil0types.h:50
flush_type
Definition: my_sys.h:291
The simple hash table utility.
static rw_lock_t * hash_lock_s_confirm(rw_lock_t *hash_lock, hash_table_t *table, uint64_t hash_value)
If not appropriate rw_lock for a hash value in a hash table, relock S-lock the another rw_lock until ...
static rw_lock_t * hash_lock_x_confirm(rw_lock_t *hash_lock, hash_table_t *table, uint64_t hash_value)
If not appropriate rw_lock for a hash value in a hash table, relock X-lock the another rw_lock until ...
static rw_lock_t * hash_get_lock(hash_table_t *table, uint64_t hash_value)
Gets the rw_lock for a hash value in a hash table.
static int flags[50]
Definition: hp_test1.cc:39
#define malloc(A)
Definition: lexyy.cc:914
#define free(A)
Definition: lexyy.cc:915
lock_mode
Definition: lock0types.h:51
uint64_t lsn_t
Type used for all log sequence number storage and arithmetic.
Definition: log0types.h:62
static uint16_t mach_read_from_2(const byte *b)
The following function is used to fetch data from 2 consecutive bytes.
static uint32_t mach_read_from_4(const byte *b)
The following function is used to fetch data from 4 consecutive bytes.
Mini-transaction buffer global types.
unsigned long long int ulonglong
Definition: my_inttypes.h:55
void copy(Shards< COUNT > &dst, const Shards< COUNT > &src) noexcept
Copy the counters, overwrite destination.
Definition: ut0counter.h:353
void clear(Shards< COUNT > &shards) noexcept
Clear the counter - reset to 0.
Definition: ut0counter.h:343
Definition: buf0block_hint.cc:29
const std::string FILE("FILE")
bool load(THD *, const dd::String_type &fname, dd::String_type *buf)
Read an sdi file from disk and store in a buffer.
Definition: sdi_file.cc:307
Definition: os0file.h:85
static Value err()
Create a Value object that represents an error condition.
Definition: json_binary.cc:909
Provides atomic access in shared-exclusive modes.
Definition: shared_spin_lock.h:78
Definition: varlen_sort.h:183
mode
Definition: file_handle.h:59
pid_type get_id()
Definition: process.h:47
static uint64_t hash_uint64(uint64_t value)
Hashes a 64-bit integer.
Definition: ut0rnd.h:185
std::list< T, ut::allocator< T > > list
Specialization of list which uses ut_allocator.
Definition: ut0new.h:2859
The interface to the operating system process control primitives.
Index page routines.
Shutdowns the Innobase database server.
std::atomic< enum srv_shutdown_t > srv_shutdown_state
At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_EXIT_THREADS.
Definition: srv0start.cc:166
@ SRV_SHUTDOWN_EXIT_THREADS
Exit all threads and free resources.
Definition: srv0shutdown.h:110
The server main program.
Functor to validate the LRU list.
Definition: buf0buf.h:2566
static void validate(const buf_pool_t *buf_pool)
Definition: buf0buf.h:2569
void operator()(const buf_page_t *elem) const
Definition: buf0buf.h:2567
Functor to validate the LRU list.
Definition: buf0buf.h:2556
void operator()(const buf_page_t *elem) const
Definition: buf0buf.h:2557
static void validate(const buf_pool_t *buf_pool)
Definition: buf0buf.h:2559
Definition: buf0buf.h:2575
static void validate(const buf_pool_t *buf_pool)
Definition: buf0buf.h:2581
void operator()(const buf_block_t *elem) const
Definition: buf0buf.h:2576
The buffer control block structure.
Definition: buf0buf.h:1664
rw_lock_t debug_latch
In the debug version, each thread which bufferfixes the block acquires an s-latch here; so we can use...
Definition: buf0buf.h:1790
uint64_t modify_clock
This clock is incremented every time a pointer to a record on the page may become obsolete; this is u...
Definition: buf0buf.h:1804
BPageLock lock
read-write lock of the buffer frame
Definition: buf0buf.h:1674
std::atomic< uint32_t > n_hash_helps
Counter which controls building of a new hash index for the page.
Definition: buf0buf.h:1701
uint16_t curr_n_fields
prefix length for hash indexing: number of full fields
Definition: buf0buf.h:1764
page_type_t get_page_type() const
Get the page type of the current buffer block.
Definition: buf0buf.h:1850
buf_page_t page
page information; this must be the first field, so that buf_pool->page_hash can point to buf_page_t o...
Definition: buf0buf.h:1670
page_no_t get_next_page_no() const
Get the next page number of the current buffer block.
Definition: buf0buf.h:1838
page_zip_des_t const * get_page_zip() const noexcept
Const version.
Definition: buf0buf.h:1867
const char * get_page_type_str() const noexcept
Get the page type of the current buffer block as string.
Definition: buf0buf.cc:6866
bool in_withdraw_list
Definition: buf0buf.h:1691
UT_LIST_NODE_T(buf_block_t) unzip_LRU
node of the decompressed LRU list; a block is in the unzip_LRU list if page.state == BUF_BLOCK_FILE_P...
page_no_t get_prev_page_no() const
Get the prev page number of the current buffer block.
Definition: buf0buf.h:1844
std::atomic< uint32_t > n_bytes
Recommended prefix length for hash search: number of bytes in an incomplete last field.
Definition: buf0buf.h:1705
std::atomic< ulint > n_pointers
used in debugging: the number of pointers in the adaptive hash index pointing to this frame; protecte...
Definition: buf0buf.h:1753
uint64_t get_modify_clock(bool single_threaded) const noexcept
Get the modified clock (version) value.
Definition: buf0buf.h:1817
bool in_unzip_LRU_list
true if the page is in the decompressed LRU list; used in debugging
Definition: buf0buf.h:1689
const page_id_t & get_page_id() const
Get the page number and space id of the current buffer block.
Definition: buf0buf.h:1830
BPageMutex mutex
mutex protecting this block: state (also protected by the buffer pool mutex), io_fix,...
Definition: buf0buf.h:1811
byte * frame
pointer to buffer frame which is of size UNIV_PAGE_SIZE, and aligned to an address divisible by UNIV_...
Definition: buf0buf.h:1680
uint16_t curr_n_bytes
number of bytes in hash indexing
Definition: buf0buf.h:1767
page_no_t get_page_no() const
Get the page number of the current buffer block.
Definition: buf0buf.h:1834
std::atomic< bool > left_side
true or false, depending on whether the leftmost record of several records with the same prefix shoul...
Definition: buf0buf.h:1712
dict_index_t * index
Index for which the adaptive hash index has been created, or NULL if the page does not exist in the i...
Definition: buf0buf.h:1781
bool made_dirty_with_no_latch
true if block has been made dirty without acquiring X/SX latch as the block belongs to temporary tabl...
Definition: buf0buf.h:1775
page_zip_des_t * get_page_zip() noexcept
Gets the compressed page descriptor corresponding to an uncompressed page if applicable.
Definition: buf0buf.h:1861
std::atomic< uint32_t > n_fields
Recommended prefix length for hash search: number of full fields.
Definition: buf0buf.h:1708
bool curr_left_side
true or false in hash indexing
Definition: buf0buf.h:1770
Struct that is embedded in the free zip blocks.
Definition: buf0buf.h:2024
UT_LIST_NODE_T(buf_buddy_free_t) list
Node of zip_free list.
ulint size
size of the block
Definition: buf0buf.h:2026
byte bytes[FIL_PAGE_DATA]
stamp[FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID] == BUF_BUDDY_FREE_STAMP denotes a free block.
Definition: buf0buf.h:2027
union buf_buddy_free_t::@184 stamp
buf_page_t bpage
Embedded bpage descriptor.
Definition: buf0buf.h:2038
Definition: buf0buf.h:2136
uint64_t relocated
Definition: buf0buf.h:2138
ulint used
Definition: buf0buf.h:2137
std::chrono::steady_clock::duration relocated_duration
Definition: buf0buf.h:2139
Statistics of buddy blocks of a given size.
Definition: buf0buf.h:2126
uint64_t relocated
Number of blocks relocated by the buddy system.
Definition: buf0buf.h:2131
snapshot_t take_snapshot()
Definition: buf0buf.h:2142
std::atomic< ulint > used
Number of blocks allocated from the buddy system.
Definition: buf0buf.h:2128
std::chrono::steady_clock::duration relocated_duration
Total duration of block relocations.
Definition: buf0buf.h:2134
A chunk of buffers.
Definition: buf0buf.ic:52
This structure defines information we will fetch from each buffer pool.
Definition: buf0buf.h:154
ulint young_making_delta
num of pages made young since last printout
Definition: buf0buf.h:199
double pages_created_rate
num of pages create per second
Definition: buf0buf.h:195
ulint n_pages_read
buf_pool->n_pages_read
Definition: buf0buf.h:175
ulint pool_unique_id
Buffer Pool ID.
Definition: buf0buf.h:156
ulint n_pend_reads
buf_pool->n_pend_reads, pages pending read
Definition: buf0buf.h:164
ulint n_ra_pages_read_rnd
buf_pool->n_ra_pages_read_rnd, number of pages readahead
Definition: buf0buf.h:179
ulint lru_len
Length of buf_pool->LRU.
Definition: buf0buf.h:158
ulint unzip_cur
buf_LRU_stat_cur.unzip, num pages decompressed in current interval
Definition: buf0buf.h:220
ulint page_read_delta
num of pages read since last printout
Definition: buf0buf.h:197
double pages_written_rate
num of pages written per second
Definition: buf0buf.h:196
double page_made_young_rate
page made young rate in pages per second
Definition: buf0buf.h:190
ulint io_sum
buf_LRU_stat_sum.io
Definition: buf0buf.h:216
ulint io_cur
buf_LRU_stat_cur.io, num of IO for current interval
Definition: buf0buf.h:217
ulint old_lru_len
buf_pool->LRU_old_len
Definition: buf0buf.h:159
double pages_readahead_rnd_rate
random readahead rate in pages per second
Definition: buf0buf.h:205
ulint unzip_sum
buf_LRU_stat_sum.unzip
Definition: buf0buf.h:219
ulint n_pages_made_young
number of pages made young
Definition: buf0buf.h:173
ulint n_ra_pages_read
buf_pool->n_ra_pages_read, number of pages readahead
Definition: buf0buf.h:181
ulint n_pages_not_made_young
number of pages not made young
Definition: buf0buf.h:174
ulint n_ra_pages_evicted
buf_pool->n_ra_pages_evicted, number of readahead pages evicted without access
Definition: buf0buf.h:183
ulint n_pages_created
buf_pool->n_pages_created
Definition: buf0buf.h:176
ulint n_pending_flush_list
Pages pending flush in FLUSH LIST.
Definition: buf0buf.h:171
ulint n_page_get_delta
num of buffer pool page gets since last printout
Definition: buf0buf.h:186
ulint free_list_len
Length of buf_pool->free list.
Definition: buf0buf.h:160
double pages_evicted_rate
rate of readahead page evicted without access, in pages per second
Definition: buf0buf.h:209
ulint n_pending_flush_lru
Pages pending flush in LRU.
Definition: buf0buf.h:166
ulint not_young_making_delta
num of pages not make young since last printout
Definition: buf0buf.h:201
ulint n_pend_unzip
buf_pool->n_pend_unzip, pages pending decompress
Definition: buf0buf.h:162
double pages_readahead_rate
readahead rate in pages per second
Definition: buf0buf.h:207
ulint unzip_lru_len
length of buf_pool->unzip_LRU list
Definition: buf0buf.h:213
ulint pool_size
Buffer Pool size in pages.
Definition: buf0buf.h:157
ulint flush_list_len
Length of buf_pool->flush_list.
Definition: buf0buf.h:161
double page_not_made_young_rate
page not made young rate in pages per second
Definition: buf0buf.h:192
ulint n_page_gets
buf_pool->n_page_gets
Definition: buf0buf.h:178
double pages_read_rate
num of pages read per second
Definition: buf0buf.h:194
ulint n_pages_written
buf_pool->n_pages_written
Definition: buf0buf.h:177
ulint n_pending_flush_single_page
Pages pending to be flushed as part of single page flushes issued by various user threads.
Definition: buf0buf.h:167
The buffer pool statistics structure.
Definition: buf0buf.h:2044
uint64_t n_pages_not_made_young
Number of pages not made young because the first access was not long enough ago, in buf_page_peek_if_...
Definition: buf0buf.h:2077
std::atomic< uint64_t > n_pages_read
Number of read operations.
Definition: buf0buf.h:2053
std::atomic< uint64_t > n_pages_created
number of pages created in the pool with no read.
Definition: buf0buf.h:2059
uint64_t n_pages_made_young
Number of pages made young, in calls to buf_LRU_make_block_young().
Definition: buf0buf.h:2073
uint64_t n_ra_pages_evicted
Number of read ahead pages that are evicted without being accessed.
Definition: buf0buf.h:2069
std::atomic< uint64_t > n_ra_pages_read_rnd
Number of pages read in as part of random read ahead.
Definition: buf0buf.h:2062
uint64_t flush_list_bytes
Flush_list size in bytes.
Definition: buf0buf.h:2083
std::atomic< uint64_t > n_ra_pages_read
Number of pages read in as part of read ahead.
Definition: buf0buf.h:2065
Shards m_n_page_gets
Number of page gets performed; also successful searches through the adaptive hash index are counted a...
Definition: buf0buf.h:2050
uint64_t LRU_bytes
LRU size in bytes.
Definition: buf0buf.h:2080
void reset()
Definition: buf0buf.h:2109
static void copy(buf_pool_stat_t &dst, const buf_pool_stat_t &src) noexcept
Definition: buf0buf.h:2085
std::atomic< uint64_t > n_pages_written
Number of write operations.
Definition: buf0buf.h:2056
The buffer pool structure.
Definition: buf0buf.h:2152
UT_LIST_BASE_NODE_T(buf_buddy_free_t, list) zip_free[BUF_BUDDY_SIZES_MAX]
Buddy free lists.
UT_LIST_BASE_NODE_T(buf_page_t, list) free
Base node of the free block list.
buf_chunk_t * chunks_old
old buffer pool chunks to be freed after resizing buffer pool
Definition: buf0buf.h:2206
hash_table_t * zip_hash
Hash table of buf_block_t blocks whose frames are allocated to the zip buddy system,...
Definition: buf0buf.h:2225
FlushHp flush_hp
"Hazard pointer" used during scan of flush_list while doing flush list batch.
Definition: buf0buf.h:2259
buf_buddy_stat_t buddy_stat[BUF_BUDDY_SIZES_MAX+1]
Statistics of buddy system, indexed by block size.
Definition: buf0buf.h:2238
LRUItr single_scan_itr
Iterator used to scan the LRU list when searching for single page flushing victim.
Definition: buf0buf.h:2332
buf_pool_stat_t old_stat
Old statistics.
Definition: buf0buf.h:2244
void deallocate_chunk(buf_chunk_t *chunk)
A wrapper for buf_pool_t::allocator.deallocate_large which also advices the OS that this chunk can be...
Definition: buf0buf.cc:913
bool try_LRU_scan
Set to false when an LRU scan for free block fails.
Definition: buf0buf.h:2298
ulint buddy_n_frames
Number of frames allocated from the buffer pool to the buddy system.
Definition: buf0buf.h:2193
std::atomic< ulint > n_pend_reads
Number of pending read operations.
Definition: buf0buf.h:2228
UT_LIST_BASE_NODE_T(buf_page_t, list) zip_clean
Unmodified compressed pages.
UT_LIST_BASE_NODE_T(buf_page_t, list) flush_list
Base node of the modified block list.
LRUHp lru_hp
"hazard pointer" used during scan of LRU while doing LRU list batch.
Definition: buf0buf.h:2324
bool allocate_chunk(ulonglong mem_size, buf_chunk_t *chunk)
A wrapper for buf_pool_t::allocator.alocate_large which also advices the OS that this chunk should no...
Definition: buf0buf.cc:879
UT_LIST_BASE_NODE_T(buf_block_t, unzip_LRU) unzip_LRU
Base node of the unzip_LRU list.
std::chrono::steady_clock::time_point last_printout_time
when buf_print_io was last time called.
Definition: buf0buf.h:2234
ulint LRU_old_ratio
Reserve this much of the buffer pool for "old" blocks.
Definition: buf0buf.h:2189
BufListMutex chunks_mutex
protects (de)allocation of chunks:
Definition: buf0buf.h:2161
lsn_t track_page_lsn
Page Tracking start LSN.
Definition: buf0buf.h:2301
BufListMutex zip_free_mutex
buddy allocator mutex
Definition: buf0buf.h:2170
volatile ulint n_chunks
Number of buffer pool chunks.
Definition: buf0buf.h:2197
LRUItr lru_scan_itr
Iterator used to scan the LRU list when searching for replaceable victim.
Definition: buf0buf.h:2328
bool madvise_dump()
Advices the OS that all chunks in this buffer pool instance can be dumped to a core file.
Definition: buf0buf.cc:924
ulint old_size
Previous pool size in pages.
Definition: buf0buf.h:2212
UT_LIST_BASE_NODE_T(buf_page_t, LRU) LRU
Base node of the LRU list.
FlushHp oldest_hp
Entry pointer to scan the oldest page except for system temporary.
Definition: buf0buf.h:2262
buf_page_t * LRU_old
Pointer to the about LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV oldest blocks in the LRU list; NULL if LRU l...
Definition: buf0buf.h:2340
buf_page_t * watch
Sentinel records for buffer pool watches.
Definition: buf0buf.h:2369
BufListMutex flush_list_mutex
Mutex protecting the flush list access.
Definition: buf0buf.h:2255
ib_mutex_t flush_state_mutex
Flush state protection mutex.
Definition: buf0buf.h:2176
ib_rbt_t * flush_rbt
A red-black tree is used exclusively during recovery to speed up insertions in the flush_list.
Definition: buf0buf.h:2285
buf_chunk_t * chunks
buffer pool chunks
Definition: buf0buf.h:2203
ulint n_flush[BUF_FLUSH_N_TYPES]
This is the number of pending writes in the given flush type.
Definition: buf0buf.h:2273
BufListMutex LRU_list_mutex
LRU list mutex.
Definition: buf0buf.h:2164
hash_table_t * page_hash
Hash table of buf_page_t or buf_block_t file pages, buf_page_in_file() == true, indexed by (space_id,...
Definition: buf0buf.h:2221
ulint withdraw_target
Target length of withdraw block list, when withdrawing.
Definition: buf0buf.h:2320
os_event_t no_flush[BUF_FLUSH_N_TYPES]
This is in the set state when there is no flush batch of the given type running.
Definition: buf0buf.h:2277
buf_pool_stat_t stat
Current statistics.
Definition: buf0buf.h:2241
page_no_t read_ahead_area
Size in pages of the area which the read-ahead algorithms read if invoked.
Definition: buf0buf.h:2216
ulint curr_pool_size
Current pool size in bytes.
Definition: buf0buf.h:2186
bool init_flush[BUF_FLUSH_N_TYPES]
This is true when a flush of the given type is being initialized.
Definition: buf0buf.h:2269
ulint curr_size
Current pool size in pages.
Definition: buf0buf.h:2209
ulint instance_no
Array index of this buffer pool instance.
Definition: buf0buf.h:2183
ulint LRU_old_len
Length of the LRU list from the block to which LRU_old points onward, including that block; see buf0l...
Definition: buf0buf.h:2346
std::atomic< ulint > n_pend_unzip
number of pending decompressions.
Definition: buf0buf.h:2231
UT_LIST_BASE_NODE_T(buf_page_t, list) withdraw
base node of the withdraw block list.
lsn_t max_lsn_io
Maximum LSN for which write io has already started.
Definition: buf0buf.h:2304
ulint freed_page_clock
A sequence number used to count the number of buffer blocks removed from the end of the LRU list; NOT...
Definition: buf0buf.h:2292
BufListMutex free_list_mutex
free and withdraw list mutex
Definition: buf0buf.h:2167
BufPoolZipMutex zip_mutex
Zip mutex of this buffer pool instance, protects compressed only pages (of type buf_page_t,...
Definition: buf0buf.h:2180
bool madvise_dont_dump()
Advices the OS that all chunks in this buffer pool instance should not be dumped to a core file.
Definition: buf0buf.cc:934
volatile ulint n_chunks_new
New number of buffer pool chunks.
Definition: buf0buf.h:2200
BufListMutex zip_hash_mutex
zip_hash mutex
Definition: buf0buf.h:2173
The occupied bytes of lists in all buffer pools.
Definition: buf0buf.h:226
ulint LRU_bytes
LRU size in bytes.
Definition: buf0buf.h:227
ulint unzip_LRU_bytes
unzip_LRU size in bytes
Definition: buf0buf.h:228
ulint flush_list_bytes
flush_list size in bytes
Definition: buf0buf.h:229
Data structure for an index.
Definition: dict0mem.h:1021
File space address.
Definition: fil0fil.h:1139
Tablespace or log data space.
Definition: fil0fil.h:230
space_id_t id
Tablespace ID.
Definition: fil0fil.h:321
void dec_ref() noexcept
Decrement the page reference count.
Definition: fil0fil.h:283
uint32_t get_current_version() const
Returns current version of the space object.
Definition: fil0fil.cc:11753
uint32_t get_recent_version() const
Returns current version of the space object.
Definition: fil0fil.cc:11757
bool was_not_deleted() const
Definition: fil0fil.cc:11745
void inc_ref() noexcept
Increment the page reference count.
Definition: fil0fil.h:277
bool is_deleted() const
Definition: fil0fil.cc:11740
Red black tree instance.
Definition: ut0rbt.h:71
Mini-transaction handle and buffer.
Definition: mtr0mtr.h:181
InnoDB condition variable.
Definition: os0event.cc:62
Compressed page descriptor.
Definition: page0types.h:199
page_zip_t * data
Compressed page data.
Definition: page0types.h:201
The structure used in the spin lock implementation of a read-write lock.
Definition: sync0rw.h:361
Definition: ut0core.h:32
bool rw_lock_own(const rw_lock_t *lock, ulint lock_type)
Checks if the thread has locked the rw-lock in the specified mode, with the pass value == 0.
Definition: sync0rw.cc:816
rw_lock_type_t
Definition: sync0rw.h:93
@ RW_NO_LATCH
Definition: sync0rw.h:97
bool rw_lock_own_flagged(const rw_lock_t *lock, rw_lock_flags_t flags)
Checks if the thread has locked the rw-lock in the specified mode, with the pass value == 0.
Definition: sync0rw.cc:856
latch_level_t
Latching order levels.
Definition: sync0types.h:200
@ RW_LOCK_S
Definition: sync0types.h:207
@ RW_LOCK_X
Definition: sync0types.h:208
@ RW_LOCK_FLAG_X
Definition: sync0types.h:1218
@ RW_LOCK_FLAG_SX
Definition: sync0types.h:1219
@ RW_LOCK_FLAG_S
Definition: sync0types.h:1217
Version control for database, common definitions, and include files.
constexpr uint32_t UINT32_UNDEFINED
The 'undefined' value for a 32-bit unsigned integer.
Definition: univ.i:429
#define UNIV_PAGE_SIZE_SHIFT
The 2-logarithm of UNIV_PAGE_SIZE:
Definition: univ.i:292
#define IF_DEBUG(...)
Definition: univ.i:675
unsigned long int ulint
Definition: univ.i:407
#define UNIV_MEM_VALID(addr, size)
Definition: univ.i:592
constexpr uint32_t UNIV_ZIP_SIZE_MIN
Smallest compressed page size.
Definition: univ.i:331
Utilities for byte operations.
#define ut_ad(EXPR)
Debug assertion.
Definition: ut0dbg.h:68
#define ut_d(EXPR)
Debug statement.
Definition: ut0dbg.h:70
#define ut_a(EXPR)
Abort execution if EXPR does not evaluate to nonzero.
Definition: ut0dbg.h:56
void ut_list_validate(const List &list, Functor &functor)
Checks the consistency of a two-way list.
Definition: ut0lst.h:492
#define mutex_own(M)
Checks that the current thread owns the mutex.
Definition: ut0mutex.h:164
Various utilities.
static uint64_t lsn
Definition: xcom_base.cc:442
unsigned long id[MAX_DEAD]
Definition: xcom_base.cc:506
int n
Definition: xcom_base.cc:505