MySQL 8.0.39
Source Code Documentation
allocator.h
Go to the documentation of this file.
1/* Copyright (c) 2016, 2024, Oracle and/or its affiliates.
2
3This program is free software; you can redistribute it and/or modify it under
4the terms of the GNU General Public License, version 2.0, as published by the
5Free Software Foundation.
6
7This program is designed to work with certain software (including
8but not limited to OpenSSL) that is licensed under separate terms,
9as designated in a particular file or component or in included license
10documentation. The authors of MySQL hereby grant you an additional
11permission to link the program and your derivative works with the
12separately licensed software that they have either included with
13the program or referenced in the documentation.
14
15This program is distributed in the hope that it will be useful, but WITHOUT
16ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
18for more details.
19
20You should have received a copy of the GNU General Public License along with
21this program; if not, write to the Free Software Foundation, Inc.,
2251 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24/** @file storage/temptable/include/temptable/allocator.h
25TempTable custom allocator. */
26
27#ifndef TEMPTABLE_ALLOCATOR_H
28#define TEMPTABLE_ALLOCATOR_H
29
30#include <algorithm> // std::max
31#include <cstddef> // size_t
32#include <limits> // std::numeric_limits
33#include <memory> // std::shared_ptr
34#include <new> // new
35#include <utility> // std::forward
36
37#include "my_dbug.h"
38#include "my_sys.h"
39#include "sql/mysqld.h" // temptable_max_ram, temptable_max_mmap
44
45namespace temptable {
46
47/* Thin abstraction which enables logging of memory operations.
48 *
49 * Used by the Allocator to implement switching from RAM to MMAP-backed
50 * allocations and vice-versa. E.g. Allocator will switch to MMAP-backed
51 * allocation strategy once temptable RAM-consumption threshold, which is
52 * defined by temptable_max_ram user-modifiable variable, is reached.
53 **/
55 struct RAM {
56 /** Log increments of heap-memory consumption.
57 *
58 * [in] Number of bytes.
59 * @return Heap-memory consumption after increase. */
60 static size_t increase(size_t bytes) {
61 assert(ram <= std::numeric_limits<decltype(bytes)>::max() - bytes);
62 return ram.fetch_add(bytes) + bytes;
63 }
64 /** Log decrements of heap-memory consumption.
65 *
66 * [in] Number of bytes.
67 * @return Heap-memory consumption after decrease. */
68 static size_t decrease(size_t bytes) {
69 assert(ram >= bytes);
70 return ram.fetch_sub(bytes) - bytes;
71 }
72 /** Get heap-memory threshold level. Level is defined by this Allocator.
73 *
74 * @return Heap-memory threshold. */
75 static size_t threshold() { return temptable_max_ram; }
76 /** Get current level of heap-memory consumption.
77 *
78 * @return Current level of heap-memory consumption (in bytes). */
79 static size_t consumption() { return ram; }
80 };
81
82 struct MMAP {
83 /** Log increments of MMAP-backed memory consumption.
84 *
85 * [in] Number of bytes.
86 * @return MMAP-memory consumption after increase. */
87 static size_t increase(size_t bytes) {
88 assert(mmap <= std::numeric_limits<decltype(bytes)>::max() - bytes);
89 return mmap.fetch_add(bytes) + bytes;
90 }
91 /** Log decrements of MMAP-backed memory consumption.
92 *
93 * [in] Number of bytes.
94 * @return MMAP-memory consumption after decrease. */
95 static size_t decrease(size_t bytes) {
96 assert(mmap >= bytes);
97 return mmap.fetch_sub(bytes) - bytes;
98 }
99 /** Get MMAP-backed memory threshold level. Level is defined by this
100 * Allocator.
101 *
102 * @return MMAP-memory threshold. */
103 static size_t threshold() {
104 if (temptable_use_mmap) {
105 return temptable_max_mmap;
106 } else {
107 return 0;
108 }
109 }
110 /** Get current level of MMAP-backed memory consumption.
111 *
112 * @return Current level of MMAP-backed memory consumption (in bytes). */
113 static size_t consumption() { return mmap; }
114 };
115
116 private:
117 /** Total bytes allocated so far by all threads in RAM/MMAP. */
118 static std::atomic<size_t> ram;
119 static std::atomic<size_t> mmap;
120};
121
122/* Thin abstraction which enables logging of how much resources have been
123 * consumed at the per-table level. Each temptable::Table will be composed
124 * of this type so that the temptable::Allocator through its policies can
125 * monitor its memory consumption and act appropriately when threshold
126 * is reached.
127 **/
129 public:
132
133 size_t increase(size_t bytes) {
134 assert(m_total_bytes <=
135 std::numeric_limits<decltype(bytes)>::max() - bytes);
136 m_total_bytes += bytes;
137 return m_total_bytes;
138 }
139 size_t decrease(size_t bytes) {
140 assert(m_total_bytes >= bytes);
141 m_total_bytes -= bytes;
142 return m_total_bytes;
143 }
144 size_t threshold() { return m_threshold; }
145 size_t consumption() { return m_total_bytes; }
146
147 private:
150};
151
152/* Allocation scheme, a type which controls allocation patterns in TempTable
153 * allocator.
154 *
155 * In particular, allocation scheme can define the behavior of TempTable
156 * allocator allocations with respect to the following:
157 * 1. Where each consecutive Block of memory is going to be allocated from
158 * (e.g. RAM vs MMAP vs etc.)
159 * 2. How big each consecutive Block of memory is going to be
160 * (e.g. monotonic growth, exponential growth, no growth, etc.)
161 *
162 * Concrete implementations of previous points must be provided through
163 * customization points, namely Block_size_policy and Block_source_policy,
164 * template type parameters. Whatever these types are, they must provide
165 * conforming interface implementations.
166 *
167 * Block_size_policy customization point must provide concrete implementation
168 * with the following signature:
169 * static size_t block_size(size_t, size_t);
170 * Similarly, concrete implementations of Block_source_policy must provide:
171 * static Source block_source(size_t);
172 *
173 * That allows us to build different concrete allocation schemes by simply
174 * composing different customization points. For example:
175 *
176 * using Monotonic_growth_RAM_only =
177 * Allocation_scheme<Monotonic_policy, RAM_only_policy>;
178 *
179 * using Exponential_growth_RAM_only =
180 * Allocation_scheme<Exponential_policy, RAM_only_policy>;
181 *
182 * using Exponential_growth_preferring_RAM_over_MMAP =
183 * Allocation_scheme<Exponential_policy, Prefer_RAM_over_MMAP_policy>;
184 *
185 * using No_growth_RAM_only =
186 * Allocation_scheme<No_growth_policy, RAM_only_policy>;
187 *
188 * etc. etc.
189 *
190 */
191template <typename Block_size_policy, typename Block_source_policy>
194 TableResourceMonitor *table_resource_monitor) {
195 return Block_source_policy::block_source(block_size,
196 table_resource_monitor);
197 }
198 static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested) {
199 return Block_size_policy::block_size(number_of_blocks, n_bytes_requested);
200 }
201 static void block_freed(uint32_t block_size, Source block_source) {
202 Block_source_policy::block_freed(block_size, block_source);
203 }
204};
205
206/* Concrete implementation of Block_source_policy, a type which controls where
207 * TempTable allocator is going to be allocating next Block of memory from.
208 *
209 * In particular, this policy will make TempTable allocator:
210 * 1. Use RAM as long as temptable_max_ram threshold is not reached.
211 * 2. Start using MMAP when temptable_max_ram threshold is reached.
212 * 3. Go back using RAM as soon as RAM consumption drops below the
213 * temptable_max_ram threshold and there is enough space to accommodate the
214 * new block given the size.
215 * 4. Not take into account per-table memory limits defined through
216 * tmp_table_size SYSVAR.
217 * */
219 static Source block_source(uint32_t block_size,
220 TableResourceMonitor * = nullptr) {
222 if (MemoryMonitor::RAM::increase(block_size) <=
224 return Source::RAM;
225 } else {
227 }
228 }
230 if (MemoryMonitor::MMAP::increase(block_size) <=
232 return Source::MMAP_FILE;
233 } else {
235 }
236 }
238 }
239
240 static void block_freed(uint32_t block_size, Source block_source) {
241 if (block_source == Source::RAM) {
243 } else {
245 }
246 }
247};
248
249/* Another concrete implementation of Block_source_policy, a type which controls
250 * where TempTable allocator is going to be allocating next Block of memory
251 * from. It acts the same as Prefer_RAM_over_MMAP_policy with the main
252 * difference being that this policy obeys the per-table limit.
253 *
254 * What this means is that each temptable::Table is allowed to fit no more data
255 * than the given threshold controlled through TableResourceMonitor abstraction.
256 * TableResourceMonitor is a simple abstraction which is in its part an alias
257 * for tmp_table_size, a system variable that end MySQL users will be using to
258 * control this threshold.
259 *
260 * Updating the tmp_table_size threshold can only be done through the separate
261 * SET statement which implies that the tmp_table_size threshold cannot be
262 * updated during the duration of some query which is running within the same
263 * session. Separate sessions can still of course change this value to their
264 * liking.
265 * */
267 static Source block_source(uint32_t block_size,
268 TableResourceMonitor *table_resource_monitor) {
269 assert(table_resource_monitor);
270 assert(table_resource_monitor->consumption() <=
271 table_resource_monitor->threshold());
272
273 if (table_resource_monitor->consumption() + block_size >
274 table_resource_monitor->threshold())
276
278 }
279
280 static void block_freed(uint32_t block_size, Source block_source) {
282 }
283};
284
285/* Concrete implementation of Block_size_policy, a type which controls how big
286 * next Block of memory is going to be allocated by TempTable allocator.
287 *
288 * In particular, this policy will make TempTable allocator to grow the
289 * block-size at exponential rate with upper limit of ALLOCATOR_MAX_BLOCK_BYTES,
290 * which is 2 ^ ALLOCATOR_MAX_BLOCK_MB_EXP.
291 *
292 * E.g. allocation pattern may look like the following:
293 * 1 MiB,
294 * 2 MiB,
295 * 4 MiB,
296 * 8 MiB,
297 * 16 MiB,
298 * 32 MiB,
299 * ...,
300 * ALLOCATOR_MAX_BLOCK_BYTES,
301 * ALLOCATOR_MAX_BLOCK_BYTES
302 *
303 * In cases when block size that is being requested is bigger than the one which
304 * is calculated by this policy, requested block size will be returned (even if
305 * it grows beyond ALLOCATOR_MAX_BLOCK_BYTES).
306 * */
308 /** Given the current number of allocated blocks by the allocator, and number
309 * of bytes actually requested by the client code, calculate the new block
310 * size.
311 *
312 * [in] Current number of allocated blocks.
313 * [in] Number of bytes requested by the client code.
314 * @return New block size. */
315 static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested) {
316 size_t block_size_hint;
317 if (number_of_blocks < ALLOCATOR_MAX_BLOCK_MB_EXP) {
318 block_size_hint = (1ULL << number_of_blocks) * 1_MiB;
319 } else {
320 block_size_hint = ALLOCATOR_MAX_BLOCK_BYTES;
321 }
322 return std::max(block_size_hint, Block::size_hint(n_bytes_requested));
323 }
324};
325
326/* This is a concrete allocation scheme which is going to be default one for
327 * TempTable allocator.
328 *
329 * It uses exponential growth policy and policy which prefers RAM allocations
330 * over MMAP allocations.
331 */
335
336/**
337 Shared state between all instances of a given allocator.
338
339 STL allocators can (since C++11) carry state; however, that state should
340 never be mutable, as the allocator can be copy-constructed and rebound
341 without further notice, so e.g. deallocating memory in one allocator could
342 mean freeing a block that an earlier copy of the allocator still thinks is
343 valid.
344
345 Usually, mutable state will be external to the allocator (e.g.
346 Mem_root_allocator will point to a MEM_ROOT, but it won't own the MEM_ROOT);
347 however, TempTable was never written this way, and doesn't have a natural
348 place to stick the allocator state. Thus, we need a kludge where the
349 allocator's state is held in a shared_ptr, owned by all the instances
350 together. This is suboptimal for performance, and also is against the style
351 guide's recommendation to have clear ownership of objects, but at least it
352 avoids the use-after-free.
353 */
354template <class AllocationScheme>
356 public:
357 /**
358 * Destroys the state, deallocate the current_block if it was left empty.
359 */
360 ~AllocatorState() noexcept {
361 if (!current_block.is_empty()) {
363 }
364 /* User must deallocate all data from all blocks, otherwise the memory will
365 * be leaked.
366 */
367 assert(number_of_blocks == 0);
368 }
369
370 /**
371 * Gets a Block from which a new allocation of the specified size should be
372 * performed. It will use the current Block or create a new one if it is too
373 * small.
374 * [in] Number of bytes that will be allocated from the returned block.
375 */
377 size_t n_bytes_requested, TableResourceMonitor *table_resource_monitor) {
378 if (current_block.is_empty() ||
379 !current_block.can_accommodate(n_bytes_requested)) {
380 /* The current_block may have been left empty during some deallocate()
381 * call. It is the last opportunity to free it before we lose reference to
382 * it.
383 */
384 if (!current_block.is_empty() &&
387 }
388
389 const size_t block_size =
390 AllocationScheme::block_size(number_of_blocks, n_bytes_requested);
392 block_size,
393 AllocationScheme::block_source(block_size, table_resource_monitor));
395 }
396 return &current_block;
397 }
398
399 /**
400 * Informs the state object of a block that has no data allocated inside of it
401 * anymore for garbage collection.
402 * [in] The empty block to manage and possibly free.
403 */
404 void block_is_not_used_anymore(Block block) noexcept {
405 if (block == current_block) {
406 /* Do nothing. Keep the last block alive. Some queries are repeatedly
407 * allocating one Row and freeing it, leading to constant allocation and
408 * deallocation of 1MB of memory for the current_block. Let's keep this
409 * block empty ready for a future use.
410 */
411 } else {
412 free_block(block);
413 }
414 }
415
416 private:
417 /**
418 * Frees the specified block and takes care of all accounting.
419 * [in] The empty block to free.
420 */
421 void free_block(Block &block) noexcept {
422 AllocationScheme::block_freed(block.size(), block.type());
423 block.destroy();
425 }
426
427 /** Current not-yet-full block to feed allocations from. */
429
430 /**
431 * Number of created blocks so far (by this Allocator object).
432 * We use this number only as a hint as to how big block to create when a
433 * new block needs to be created.
434 */
436};
437
438/** Custom memory allocator. All dynamic memory used by the TempTable engine
439 * is allocated through this allocator.
440 *
441 * The purpose of this allocator is to minimize the number of calls to the OS
442 * for allocating new memory (e.g. malloc()) and to improve the spatial
443 * locality of reference. It is able to do so quite easily thanks to the
444 * Block/Chunk entities it is implemented in terms of. Due to the design of
445 * these entities, it is also able to feed allocations and deallocations in
446 * (amortized) constant-time and keep being CPU memory-access friendly because
447 * of the internal self-adjustment to word-size memory alignment. To learn even
448 * more about specifics and more properties please have a look at the respective
449 * header files of Header/Block/Chunk class declarations.
450 *
451 * The most common use case, for which it is optimized,
452 * is to have the following performed by a single thread:
453 * - allocate many times (creation of a temp table and inserting data into it).
454 * - use the allocated memory (selects on the temp table).
455 * - free all the pieces (drop of the temp table).
456 *
457 * The allocator allocates memory from the OS in large blocks (e.g. a few MiB)
458 * whose size also increases progressively by the increasing number of
459 * allocation requests. Exact block-size increase progress is defined by the
460 * block allocation scheme which, by default, is set to
461 * AllocationScheme::Exponential.
462 *
463 * Allocator does not store a list of all allocated blocks but only keeps track
464 * of the current block which has not yet been entirely filled up and the
465 * overall number of allocated blocks. When current block gets filled up, new
466 * one is created and immediately made current.
467 *
468 * Furthermore, it always keeps the last block alive. It cannot be deallocated
469 * by the user. Last block is automatically deallocated at the thread exit.
470 *
471 * Allocator will also keep track of RAM-consumption and in case it reaches the
472 * threshold defined by temptable_max_ram, it will switch to MMAP-backed block
473 * allocations. It will switch back once RAM consumption is again below the
474 * threshold. */
475template <class T,
476 class AllocationScheme = Exponential_growth_preferring_RAM_over_MMAP>
478 static_assert(alignof(T) <= Block::ALIGN_TO,
479 "T's with alignment-requirement larger than "
480 "Block::ALIGN_TO are not supported.");
481 static_assert(sizeof(T) > 0, "Zero sized objects are not supported");
482
483 public:
484 typedef T *pointer;
485 typedef const T *const_pointer;
486 typedef T &reference;
487 typedef const T &const_reference;
488 typedef T value_type;
489 typedef size_t size_type;
490 typedef ptrdiff_t difference_type;
491
492 template <class U>
493 struct rebind {
495 };
496
497 /** Constructor. */
498 Allocator(Block *shared_block, TableResourceMonitor &table_resource_monitor);
499
500 /** Constructor from allocator of another type. The state is copied into the
501 * new object. */
502 template <class U>
503 Allocator(
504 /** [in] Source Allocator object. */
505 const Allocator<U> &other);
506
507 /** Move constructor from allocator of another type. */
508 template <class U>
509 Allocator(
510 /** [in,out] Source Allocator object. */
511 Allocator<U> &&other) noexcept;
512
513 /** Destructor. */
515
516 Allocator(const Allocator &) = default;
517
518 /** Assignment operator, not used, thus disabled. */
519 template <class U>
520 void operator=(const Allocator<U> &) = delete;
521
522 /** Move operator, not used, thus disabled. */
523 template <class U>
524 void operator=(const Allocator<U> &&) = delete;
525
526 /** Equality operator.
527 * @return true if equal */
528 template <class U>
529 bool operator==(
530 /** [in] Object to compare with. */
531 const Allocator<U> &rhs) const;
532
533 /** Inequality operator.
534 * @return true if not equal */
535 template <class U>
536 bool operator!=(
537 /** [in] Object to compare with. */
538 const Allocator<U> &rhs) const;
539
540 /** Allocate memory for storing `n_elements` number of elements. */
541 T *allocate(
542 /** [in] Number of elements that must be allocated. */
543 size_t n_elements);
544
545 /** Free a memory allocated by allocate(). */
546 void deallocate(
547 /** [in,out] Pointer to memory to free. */
548 T *ptr,
549 /** [in] Number of elements allocated. */
550 size_t n_elements);
551
552 /** Construct one object of type `U` on an already allocated chunk of memory,
553 * which must be large enough to store it. */
554 template <class U, class... Args>
555 void construct(
556 /** [in] Memory where to create the object. */
557 U *mem,
558 /** Arguments to pass to U's constructor. */
559 Args &&... args);
560
561 /** Destroy an object of type `U`. The memory is not returned to the OS, this
562 * is the counterpart of `construct()`. */
563 template <class U>
564 void destroy(
565 /** [in, out] Object to destroy. */
566 U *p);
567
568 /** Initialize necessary structures. Called once in the OS process lifetime,
569 * before other methods. */
570 static void init();
571
572 /**
573 Shared state between all the copies and rebinds of this allocator.
574 See AllocatorState for details.
575 */
576 std::shared_ptr<AllocatorState<AllocationScheme>> m_state;
577
578 /** A block of memory which is a state external to this allocator and can be
579 * shared among different instances of the allocator (not simultaneously). In
580 * order to speed up its operations, allocator may decide to consume the
581 * memory of this shared block.
582 */
584 /** Table resource monitor control mechanism that limits the amount of
585 * resources that can be consumed at the per-table level.
586 */
588};
589
590/* Implementation of inlined methods. */
591
592template <class T, class AllocationScheme>
594 Block *shared_block, TableResourceMonitor &table_resource_monitor)
595 : m_state(std::make_shared<AllocatorState<AllocationScheme>>()),
596 m_shared_block(shared_block),
597 m_table_resource_monitor(table_resource_monitor) {}
598
599template <class T, class AllocationScheme>
600template <class U>
602 : m_state(other.m_state),
603 m_shared_block(other.m_shared_block),
604 m_table_resource_monitor(other.m_table_resource_monitor) {}
605
606template <class T, class AllocationScheme>
607template <class U>
609 : m_state(std::move(other.m_state)),
610 m_shared_block(other.m_shared_block),
611 m_table_resource_monitor(other.m_table_resource_monitor) {}
612
613template <class T, class AllocationScheme>
615
616template <class T, class AllocationScheme>
617template <class U>
619 const Allocator<U> &) const {
620 return true;
621}
622
623template <class T, class AllocationScheme>
624template <class U>
626 const Allocator<U> &rhs) const {
627 return !(*this == rhs);
628}
629
630template <class T, class AllocationScheme>
631inline T *Allocator<T, AllocationScheme>::allocate(size_t n_elements) {
632 assert(n_elements <= std::numeric_limits<size_type>::max() / sizeof(T));
633 DBUG_EXECUTE_IF("temptable_allocator_oom", throw Result::OUT_OF_MEM;);
634 DBUG_EXECUTE_IF("temptable_allocator_record_file_full",
636
637 const size_t n_bytes_requested = n_elements * sizeof(T);
638 if (n_bytes_requested == 0) {
639 return nullptr;
640 }
641
642 Block *block;
643
644 if (m_shared_block && m_shared_block->is_empty()) {
645 const size_t block_size =
646 AllocationScheme::block_size(0, n_bytes_requested);
647 *m_shared_block = Block(
648 block_size,
649 AllocationScheme::block_source(block_size, &m_table_resource_monitor));
650 block = m_shared_block;
651 } else if (m_shared_block &&
652 m_shared_block->can_accommodate(n_bytes_requested)) {
653 block = m_shared_block;
654 } else {
655 block = m_state->get_block_for_new_allocation(n_bytes_requested,
656 &m_table_resource_monitor);
657 }
658
659 m_table_resource_monitor.increase(n_bytes_requested);
660
661 T *chunk_data =
662 reinterpret_cast<T *>(block->allocate(n_bytes_requested).data());
663 assert(reinterpret_cast<uintptr_t>(chunk_data) % alignof(T) == 0);
664 return chunk_data;
665}
666
667template <class T, class AllocationScheme>
669 size_t n_elements) {
670 assert(reinterpret_cast<uintptr_t>(chunk_data) % alignof(T) == 0);
671
672 if (chunk_data == nullptr) {
673 return;
674 }
675
676 const size_t n_bytes_requested = n_elements * sizeof(T);
677
678 Block block = Block(Chunk(chunk_data));
679 const auto remaining_chunks =
680 block.deallocate(Chunk(chunk_data), n_bytes_requested);
681 if (remaining_chunks == 0) {
682 if (m_shared_block && (block == *m_shared_block)) {
683 // Do nothing. Keep the last block alive.
684 } else {
685 m_state->block_is_not_used_anymore(block);
686 }
687 }
688 m_table_resource_monitor.decrease(n_bytes_requested);
689}
690
691template <class T, class AllocationScheme>
692template <class U, class... Args>
693inline void Allocator<T, AllocationScheme>::construct(U *mem, Args &&... args) {
694 new (mem) U(std::forward<Args>(args)...);
695}
696
697template <class T, class AllocationScheme>
698template <class U>
700 p->~U();
701}
702
703template <class T, class AllocationScheme>
706}
707
708} /* namespace temptable */
709
710#endif /* TEMPTABLE_ALLOCATOR_H */
Block abstraction for temptable-allocator.
Chunk abstraction for temptable Block allocator.
Shared state between all instances of a given allocator.
Definition: allocator.h:355
void block_is_not_used_anymore(Block block) noexcept
Informs the state object of a block that has no data allocated inside of it anymore for garbage colle...
Definition: allocator.h:404
Block * get_block_for_new_allocation(size_t n_bytes_requested, TableResourceMonitor *table_resource_monitor)
Gets a Block from which a new allocation of the specified size should be performed.
Definition: allocator.h:376
size_t number_of_blocks
Number of created blocks so far (by this Allocator object).
Definition: allocator.h:435
Block current_block
Current not-yet-full block to feed allocations from.
Definition: allocator.h:428
void free_block(Block &block) noexcept
Frees the specified block and takes care of all accounting.
Definition: allocator.h:421
~AllocatorState() noexcept
Destroys the state, deallocate the current_block if it was left empty.
Definition: allocator.h:360
Custom memory allocator.
Definition: allocator.h:477
Block * m_shared_block
A block of memory which is a state external to this allocator and can be shared among different insta...
Definition: allocator.h:583
void construct(U *mem, Args &&... args)
Construct one object of type U on an already allocated chunk of memory, which must be large enough to...
Definition: allocator.h:693
T & reference
Definition: allocator.h:486
const T * const_pointer
Definition: allocator.h:485
void deallocate(T *ptr, size_t n_elements)
Free a memory allocated by allocate().
Definition: allocator.h:668
const T & const_reference
Definition: allocator.h:487
void operator=(const Allocator< U > &&)=delete
Move operator, not used, thus disabled.
Allocator(const Allocator &)=default
~Allocator()
Destructor.
Allocator(Block *shared_block, TableResourceMonitor &table_resource_monitor)
Constructor.
Definition: allocator.h:593
T * pointer
Definition: allocator.h:480
T * allocate(size_t n_elements)
Allocate memory for storing n_elements number of elements.
Definition: allocator.h:631
size_t size_type
Definition: allocator.h:489
void destroy(U *p)
Destroy an object of type U.
Definition: allocator.h:699
static void init()
Initialize necessary structures.
Definition: allocator.h:704
bool operator!=(const Allocator< U > &rhs) const
Inequality operator.
Definition: allocator.h:625
TableResourceMonitor & m_table_resource_monitor
Table resource monitor control mechanism that limits the amount of resources that can be consumed at ...
Definition: allocator.h:587
bool operator==(const Allocator< U > &rhs) const
Equality operator.
Definition: allocator.h:618
ptrdiff_t difference_type
Definition: allocator.h:490
std::shared_ptr< AllocatorState< AllocationScheme > > m_state
Shared state between all the copies and rebinds of this allocator.
Definition: allocator.h:576
T value_type
Definition: allocator.h:488
void operator=(const Allocator< U > &)=delete
Assignment operator, not used, thus disabled.
Memory-block abstraction whose purpose is to serve as a building block for custom memory-allocator im...
Definition: block.h:163
bool is_empty() const
Check if Block is empty (not holding any data).
Definition: block.h:399
bool can_accommodate(size_t chunk_size) const
Check if Block can fit (allocate) a Chunk of given size.
Definition: block.h:403
size_t number_of_used_chunks() const
Get current number of Chunks allocated by the Block.
Definition: block.h:427
static constexpr size_t ALIGN_TO
Block will self-adjust all requested allocation-sizes to the multiple of this value.
Definition: block.h:167
Chunk allocate(size_t chunk_size) noexcept
Allocate a Chunk from a Block.
Definition: block.h:351
static size_t size_hint(size_t n_bytes)
For given size, how much memory will Block with single Chunk actually occupy.
Definition: block.h:448
Chunk is an abstraction with the purpose of representing a smallest logical memory-unit within the Bl...
Definition: chunk.h:68
uint8_t * data() const
Get the pointer to the data section which will be provided to the end-user.
Definition: chunk.h:158
Definition: allocator.h:128
TableResourceMonitor(size_t threshold)
Definition: allocator.h:130
size_t m_total_bytes
Definition: allocator.h:149
size_t increase(size_t bytes)
Definition: allocator.h:133
size_t decrease(size_t bytes)
Definition: allocator.h:139
size_t m_threshold
Definition: allocator.h:148
size_t threshold()
Definition: allocator.h:144
size_t consumption()
Definition: allocator.h:145
const char * p
Definition: ctype-mb.cc:1237
#define U
Definition: ctype-tis620.cc:75
Memory utilities for temptable-allocator.
#define DBUG_EXECUTE_IF(keyword, a1)
Definition: my_dbug.h:171
Common header for many mysys elements.
ulonglong temptable_max_ram
Definition: mysqld.cc:1166
bool temptable_use_mmap
Definition: mysqld.cc:1168
ulonglong temptable_max_mmap
Definition: mysqld.cc:1167
Definition: gcs_xcom_synode.h:64
Definition: allocator.h:45
constexpr size_t ALLOCATOR_MAX_BLOCK_MB_EXP
log2(allocator max block size in MiB).
Definition: constants.h:60
void Block_PSI_init()
Initialize the PSI memory engine.
Definition: block.cc:74
constexpr size_t ALLOCATOR_MAX_BLOCK_BYTES
Limit on the size of a block created by Allocator (in bytes).
Definition: constants.h:65
Source
Type of memory allocated.
Definition: memutils.h:68
@ MMAP_FILE
Memory is allocated on disk, using mmap()'ed file.
@ RAM
Memory is allocated from RAM, using malloc() for example.
std::enable_if_t<!std::is_array< T >::value, std::shared_ptr< T > > make_shared(Args &&... args)
Dynamically allocates storage for an object of type T.
Definition: ut0new.h:2590
static MEM_ROOT mem
Definition: sql_servers.cc:99
TempTable constants.
Definition: allocator.h:192
static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested)
Definition: allocator.h:198
static Source block_source(size_t block_size, TableResourceMonitor *table_resource_monitor)
Definition: allocator.h:193
static void block_freed(uint32_t block_size, Source block_source)
Definition: allocator.h:201
Definition: allocator.h:493
Allocator< U, AllocationScheme > other
Definition: allocator.h:494
Definition: allocator.h:307
static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested)
Given the current number of allocated blocks by the allocator, and number of bytes actually requested...
Definition: allocator.h:315
Definition: allocator.h:82
static size_t increase(size_t bytes)
Log increments of MMAP-backed memory consumption.
Definition: allocator.h:87
static size_t threshold()
Get MMAP-backed memory threshold level.
Definition: allocator.h:103
static size_t decrease(size_t bytes)
Log decrements of MMAP-backed memory consumption.
Definition: allocator.h:95
static size_t consumption()
Get current level of MMAP-backed memory consumption.
Definition: allocator.h:113
Definition: allocator.h:55
static size_t consumption()
Get current level of heap-memory consumption.
Definition: allocator.h:79
static size_t decrease(size_t bytes)
Log decrements of heap-memory consumption.
Definition: allocator.h:68
static size_t increase(size_t bytes)
Log increments of heap-memory consumption.
Definition: allocator.h:60
static size_t threshold()
Get heap-memory threshold level.
Definition: allocator.h:75
Definition: allocator.h:54
static std::atomic< size_t > mmap
Definition: allocator.h:119
static std::atomic< size_t > ram
Total bytes allocated so far by all threads in RAM/MMAP.
Definition: allocator.h:118
static void block_freed(uint32_t block_size, Source block_source)
Definition: allocator.h:280
static Source block_source(uint32_t block_size, TableResourceMonitor *table_resource_monitor)
Definition: allocator.h:267
Definition: allocator.h:218
static void block_freed(uint32_t block_size, Source block_source)
Definition: allocator.h:240
static Source block_source(uint32_t block_size, TableResourceMonitor *=nullptr)
Definition: allocator.h:219
Definition: dtoa.cc:594