MySQL 8.3.0
Source Code Documentation
allocator.h
Go to the documentation of this file.
1/* Copyright (c) 2016, 2023, Oracle and/or its affiliates.
2
3This program is free software; you can redistribute it and/or modify it under
4the terms of the GNU General Public License, version 2.0, as published by the
5Free Software Foundation.
6
7This program is also distributed with certain software (including but not
8limited to OpenSSL) that is licensed under separate terms, as designated in a
9particular file or component or in included license documentation. The authors
10of MySQL hereby grant you an additional permission to link the program and
11your derivative works with the separately licensed software that they have
12included with MySQL.
13
14This program is distributed in the hope that it will be useful, but WITHOUT
15ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
16FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
17for more details.
18
19You should have received a copy of the GNU General Public License along with
20this program; if not, write to the Free Software Foundation, Inc.,
2151 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23/** @file storage/temptable/include/temptable/allocator.h
24TempTable custom allocator. */
25
26#ifndef TEMPTABLE_ALLOCATOR_H
27#define TEMPTABLE_ALLOCATOR_H
28
29#include <algorithm> // std::max
30#include <cstddef> // size_t
31#include <limits> // std::numeric_limits
32#include <memory> // std::shared_ptr
33#include <new> // new
34#include <utility> // std::forward
35
36#include "my_dbug.h"
37#include "my_sys.h"
38#include "sql/mysqld.h" // temptable_max_ram, temptable_max_mmap
43
44namespace temptable {
45
46/* Thin abstraction which enables logging of memory operations.
47 *
48 * Used by the Allocator to implement switching from RAM to MMAP-backed
49 * allocations and vice-versa. E.g. Allocator will switch to MMAP-backed
50 * allocation strategy once temptable RAM-consumption threshold, which is
51 * defined by temptable_max_ram user-modifiable variable, is reached.
52 **/
54 struct RAM {
55 /** Log increments of heap-memory consumption.
56 *
57 * [in] Number of bytes.
58 * @return Heap-memory consumption after increase. */
59 static size_t increase(size_t bytes) {
60 assert(ram <= std::numeric_limits<decltype(bytes)>::max() - bytes);
61 return ram.fetch_add(bytes) + bytes;
62 }
63 /** Log decrements of heap-memory consumption.
64 *
65 * [in] Number of bytes.
66 * @return Heap-memory consumption after decrease. */
67 static size_t decrease(size_t bytes) {
68 assert(ram >= bytes);
69 return ram.fetch_sub(bytes) - bytes;
70 }
71 /** Get heap-memory threshold level. Level is defined by this Allocator.
72 *
73 * @return Heap-memory threshold. */
74 static size_t threshold() { return temptable_max_ram; }
75 /** Get current level of heap-memory consumption.
76 *
77 * @return Current level of heap-memory consumption (in bytes). */
78 static size_t consumption() { return ram; }
79 };
80
81 struct MMAP {
82 /** Log increments of MMAP-backed memory consumption.
83 *
84 * [in] Number of bytes.
85 * @return MMAP-memory consumption after increase. */
86 static size_t increase(size_t bytes) {
87 assert(mmap <= std::numeric_limits<decltype(bytes)>::max() - bytes);
88 return mmap.fetch_add(bytes) + bytes;
89 }
90 /** Log decrements of MMAP-backed memory consumption.
91 *
92 * [in] Number of bytes.
93 * @return MMAP-memory consumption after decrease. */
94 static size_t decrease(size_t bytes) {
95 assert(mmap >= bytes);
96 return mmap.fetch_sub(bytes) - bytes;
97 }
98 /** Get MMAP-backed memory threshold level. Level is defined by this
99 * Allocator.
100 *
101 * @return MMAP-memory threshold. */
102 static size_t threshold() {
103 if (temptable_use_mmap) {
104 return temptable_max_mmap;
105 } else {
106 return 0;
107 }
108 }
109 /** Get current level of MMAP-backed memory consumption.
110 *
111 * @return Current level of MMAP-backed memory consumption (in bytes). */
112 static size_t consumption() { return mmap; }
113 };
114
115 private:
116 /** Total bytes allocated so far by all threads in RAM/MMAP. */
117 static std::atomic<size_t> ram;
118 static std::atomic<size_t> mmap;
119};
120
121/* Thin abstraction which enables logging of how much resources have been
122 * consumed at the per-table level. Each temptable::Table will be composed
123 * of this type so that the temptable::Allocator through its policies can
124 * monitor its memory consumption and act appropriately when threshold
125 * is reached.
126 **/
128 public:
131
132 size_t increase(size_t bytes) {
133 assert(m_total_bytes <=
134 std::numeric_limits<decltype(bytes)>::max() - bytes);
135 m_total_bytes += bytes;
136 return m_total_bytes;
137 }
138 size_t decrease(size_t bytes) {
139 assert(m_total_bytes >= bytes);
140 m_total_bytes -= bytes;
141 return m_total_bytes;
142 }
143 size_t threshold() { return m_threshold; }
144 size_t consumption() { return m_total_bytes; }
145
146 private:
149};
150
151/* Allocation scheme, a type which controls allocation patterns in TempTable
152 * allocator.
153 *
154 * In particular, allocation scheme can define the behavior of TempTable
155 * allocator allocations with respect to the following:
156 * 1. Where each consecutive Block of memory is going to be allocated from
157 * (e.g. RAM vs MMAP vs etc.)
158 * 2. How big each consecutive Block of memory is going to be
159 * (e.g. monotonic growth, exponential growth, no growth, etc.)
160 *
161 * Concrete implementations of previous points must be provided through
162 * customization points, namely Block_size_policy and Block_source_policy,
163 * template type parameters. Whatever these types are, they must provide
164 * conforming interface implementations.
165 *
166 * Block_size_policy customization point must provide concrete implementation
167 * with the following signature:
168 * static size_t block_size(size_t, size_t);
169 * Similarly, concrete implementations of Block_source_policy must provide:
170 * static Source block_source(size_t);
171 *
172 * That allows us to build different concrete allocation schemes by simply
173 * composing different customization points. For example:
174 *
175 * using Monotonic_growth_RAM_only =
176 * Allocation_scheme<Monotonic_policy, RAM_only_policy>;
177 *
178 * using Exponential_growth_RAM_only =
179 * Allocation_scheme<Exponential_policy, RAM_only_policy>;
180 *
181 * using Exponential_growth_preferring_RAM_over_MMAP =
182 * Allocation_scheme<Exponential_policy, Prefer_RAM_over_MMAP_policy>;
183 *
184 * using No_growth_RAM_only =
185 * Allocation_scheme<No_growth_policy, RAM_only_policy>;
186 *
187 * etc. etc.
188 *
189 */
190template <typename Block_size_policy, typename Block_source_policy>
193 return Block_source_policy::block_source(block_size);
194 }
195 static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested) {
196 return Block_size_policy::block_size(number_of_blocks, n_bytes_requested);
197 }
198};
199
200/* Concrete implementation of Block_source_policy, a type which controls where
201 * TempTable allocator is going to be allocating next Block of memory from.
202 *
203 * In particular, this policy will make TempTable allocator:
204 * 1. Use RAM as long as temptable_max_ram threshold is not reached.
205 * 2. Start using MMAP when temptable_max_ram threshold is reached.
206 * 3. Go back using RAM as soon as RAM consumption drops below the
207 * temptable_max_ram threshold and there is enough space to accommodate the
208 * new block given the size.
209 * 4. Not take into account per-table memory limits defined through
210 * tmp_table_size SYSVAR.
211 * */
213 static Source block_source(uint32_t block_size) {
215 if (MemoryMonitor::RAM::increase(block_size) <=
217 return Source::RAM;
218 } else {
220 }
221 }
223 if (MemoryMonitor::MMAP::increase(block_size) <=
225 return Source::MMAP_FILE;
226 } else {
228 }
229 }
231 }
232};
233
234/* Concrete implementation of Block_size_policy, a type which controls how big
235 * next Block of memory is going to be allocated by TempTable allocator.
236 *
237 * In particular, this policy will make TempTable allocator to grow the
238 * block-size at exponential rate with upper limit of ALLOCATOR_MAX_BLOCK_BYTES,
239 * which is 2 ^ ALLOCATOR_MAX_BLOCK_MB_EXP.
240 *
241 * E.g. allocation pattern may look like the following:
242 * 1 MiB,
243 * 2 MiB,
244 * 4 MiB,
245 * 8 MiB,
246 * 16 MiB,
247 * 32 MiB,
248 * ...,
249 * ALLOCATOR_MAX_BLOCK_BYTES,
250 * ALLOCATOR_MAX_BLOCK_BYTES
251 *
252 * In cases when block size that is being requested is bigger than the one which
253 * is calculated by this policy, requested block size will be returned (even if
254 * it grows beyond ALLOCATOR_MAX_BLOCK_BYTES).
255 * */
257 /** Given the current number of allocated blocks by the allocator, and number
258 * of bytes actually requested by the client code, calculate the new block
259 * size.
260 *
261 * [in] Current number of allocated blocks.
262 * [in] Number of bytes requested by the client code.
263 * @return New block size. */
264 static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested) {
265 size_t block_size_hint;
266 if (number_of_blocks < ALLOCATOR_MAX_BLOCK_MB_EXP) {
267 block_size_hint = (1ULL << number_of_blocks) * 1_MiB;
268 } else {
269 block_size_hint = ALLOCATOR_MAX_BLOCK_BYTES;
270 }
271 return std::max(block_size_hint, Block::size_hint(n_bytes_requested));
272 }
273};
274
275/* This is a concrete allocation scheme which is going to be default one for
276 * TempTable allocator.
277 *
278 * It uses exponential growth policy and policy which prefers RAM allocations
279 * over MMAP allocations.
280 */
283
284/**
285 Shared state between all instances of a given allocator.
286
287 STL allocators can (since C++11) carry state; however, that state should
288 never be mutable, as the allocator can be copy-constructed and rebound
289 without further notice, so e.g. deallocating memory in one allocator could
290 mean freeing a block that an earlier copy of the allocator still thinks is
291 valid.
292
293 Usually, mutable state will be external to the allocator (e.g.
294 Mem_root_allocator will point to a MEM_ROOT, but it won't own the MEM_ROOT);
295 however, TempTable was never written this way, and doesn't have a natural
296 place to stick the allocator state. Thus, we need a kludge where the
297 allocator's state is held in a shared_ptr, owned by all the instances
298 together. This is suboptimal for performance, and also is against the style
299 guide's recommendation to have clear ownership of objects, but at least it
300 avoids the use-after-free.
301 */
303 /** Current not-yet-full block to feed allocations from. */
305
306 /**
307 * Number of created blocks so far (by this Allocator object).
308 * We use this number only as a hint as to how big block to create when a
309 * new block needs to be created.
310 */
312};
313
314/** Custom memory allocator. All dynamic memory used by the TempTable engine
315 * is allocated through this allocator.
316 *
317 * The purpose of this allocator is to minimize the number of calls to the OS
318 * for allocating new memory (e.g. malloc()) and to improve the spatial
319 * locality of reference. It is able to do so quite easily thanks to the
320 * Block/Chunk entities it is implemented in terms of. Due to the design of
321 * these entities, it is also able to feed allocations and deallocations in
322 * (amortized) constant-time and keep being CPU memory-access friendly because
323 * of the internal self-adjustment to word-size memory alignment. To learn even
324 * more about specifics and more properties please have a look at the respective
325 * header files of Header/Block/Chunk class declarations.
326 *
327 * The most common use case, for which it is optimized,
328 * is to have the following performed by a single thread:
329 * - allocate many times (creation of a temp table and inserting data into it).
330 * - use the allocated memory (selects on the temp table).
331 * - free all the pieces (drop of the temp table).
332 *
333 * The allocator allocates memory from the OS in large blocks (e.g. a few MiB)
334 * whose size also increases progressively by the increasing number of
335 * allocation requests. Exact block-size increase progress is defined by the
336 * block allocation scheme which, by default, is set to
337 * AllocationScheme::Exponential.
338 *
339 * Allocator does not store a list of all allocated blocks but only keeps track
340 * of the current block which has not yet been entirely filled up and the
341 * overall number of allocated blocks. When current block gets filled up, new
342 * one is created and immediately made current.
343 *
344 * Furthermore, it always keeps the last block alive. It cannot be deallocated
345 * by the user. Last block is automatically deallocated at the thread exit.
346 *
347 * Allocator will also keep track of RAM-consumption and in case it reaches the
348 * threshold defined by temptable_max_ram, it will switch to MMAP-backed block
349 * allocations. It will switch back once RAM consumption is again below the
350 * threshold. */
351template <class T,
352 class AllocationScheme = Exponential_growth_preferring_RAM_over_MMAP>
354 static_assert(alignof(T) <= Block::ALIGN_TO,
355 "T's with alignment-requirement larger than "
356 "Block::ALIGN_TO are not supported.");
357 static_assert(sizeof(T) > 0, "Zero sized objects are not supported");
358
359 public:
360 typedef T *pointer;
361 typedef const T *const_pointer;
362 typedef T &reference;
363 typedef const T &const_reference;
364 typedef T value_type;
365 typedef size_t size_type;
366 typedef ptrdiff_t difference_type;
367
368 template <class U>
369 struct rebind {
371 };
372
373 /** Constructor. */
374 Allocator(Block *shared_block, TableResourceMonitor &table_resource_monitor);
375
376 /** Constructor from allocator of another type. The state is copied into the
377 * new object. */
378 template <class U>
379 Allocator(
380 /** [in] Source Allocator object. */
381 const Allocator<U> &other);
382
383 /** Move constructor from allocator of another type. */
384 template <class U>
385 Allocator(
386 /** [in,out] Source Allocator object. */
387 Allocator<U> &&other) noexcept;
388
389 /** Destructor. */
391
392 Allocator(const Allocator &) = default;
393
394 /** Assignment operator, not used, thus disabled. */
395 template <class U>
396 void operator=(const Allocator<U> &) = delete;
397
398 /** Move operator, not used, thus disabled. */
399 template <class U>
400 void operator=(const Allocator<U> &&) = delete;
401
402 /** Equality operator.
403 * @return true if equal */
404 template <class U>
405 bool operator==(
406 /** [in] Object to compare with. */
407 const Allocator<U> &rhs) const;
408
409 /** Inequality operator.
410 * @return true if not equal */
411 template <class U>
412 bool operator!=(
413 /** [in] Object to compare with. */
414 const Allocator<U> &rhs) const;
415
416 /** Allocate memory for storing `n_elements` number of elements. */
417 T *allocate(
418 /** [in] Number of elements that must be allocated. */
419 size_t n_elements);
420
421 /** Free a memory allocated by allocate(). */
422 void deallocate(
423 /** [in,out] Pointer to memory to free. */
424 T *ptr,
425 /** [in] Number of elements allocated. */
426 size_t n_elements);
427
428 /** Construct one object of type `U` on an already allocated chunk of memory,
429 * which must be large enough to store it. */
430 template <class U, class... Args>
431 void construct(
432 /** [in] Memory where to create the object. */
433 U *mem,
434 /** Arguments to pass to U's constructor. */
435 Args &&... args);
436
437 /** Destroy an object of type `U`. The memory is not returned to the OS, this
438 * is the counterpart of `construct()`. */
439 template <class U>
440 void destroy(
441 /** [in, out] Object to destroy. */
442 U *p);
443
444 /** Initialize necessary structures. Called once in the OS process lifetime,
445 * before other methods. */
446 static void init();
447
448 /**
449 Shared state between all the copies and rebinds of this allocator.
450 See AllocatorState for details.
451 */
452 std::shared_ptr<AllocatorState> m_state;
453
454 /** A block of memory which is a state external to this allocator and can be
455 * shared among different instances of the allocator (not simultaneously). In
456 * order to speed up its operations, allocator may decide to consume the
457 * memory of this shared block.
458 */
460 /** Table resource monitor control mechanism that limits the amount of
461 * resources that can be consumed at the per-table level.
462 */
464};
465
466/* Implementation of inlined methods. */
467
468template <class T, class AllocationScheme>
470 Block *shared_block, TableResourceMonitor &table_resource_monitor)
471 : m_state(std::make_shared<AllocatorState>()),
472 m_shared_block(shared_block),
473 m_table_resource_monitor(table_resource_monitor) {}
474
475template <class T, class AllocationScheme>
476template <class U>
478 : m_state(other.m_state),
479 m_shared_block(other.m_shared_block),
480 m_table_resource_monitor(other.m_table_resource_monitor) {}
481
482template <class T, class AllocationScheme>
483template <class U>
485 : m_state(std::move(other.m_state)),
486 m_shared_block(other.m_shared_block),
487 m_table_resource_monitor(other.m_table_resource_monitor) {}
488
489template <class T, class AllocationScheme>
491
492template <class T, class AllocationScheme>
493template <class U>
495 const Allocator<U> &) const {
496 return true;
497}
498
499template <class T, class AllocationScheme>
500template <class U>
502 const Allocator<U> &rhs) const {
503 return !(*this == rhs);
504}
505
506template <class T, class AllocationScheme>
507inline T *Allocator<T, AllocationScheme>::allocate(size_t n_elements) {
508 assert(n_elements <= std::numeric_limits<size_type>::max() / sizeof(T));
509 DBUG_EXECUTE_IF("temptable_allocator_oom", throw Result::OUT_OF_MEM;);
510 DBUG_EXECUTE_IF("temptable_allocator_record_file_full",
512
513 const size_t n_bytes_requested = n_elements * sizeof(T);
514 if (n_bytes_requested == 0) {
515 return nullptr;
516 }
517
518 Block *block;
519
520 if (m_shared_block && m_shared_block->is_empty()) {
521 const size_t block_size =
522 AllocationScheme::block_size(0, n_bytes_requested);
523 *m_shared_block =
524 Block(block_size, AllocationScheme::block_source(block_size));
525 block = m_shared_block;
526 } else if (m_shared_block &&
527 m_shared_block->can_accommodate(n_bytes_requested)) {
528 block = m_shared_block;
529 } else if (m_state->current_block.is_empty() ||
530 !m_state->current_block.can_accommodate(n_bytes_requested)) {
531 const size_t block_size = AllocationScheme::block_size(
532 m_state->number_of_blocks, n_bytes_requested);
533 m_state->current_block =
534 Block(block_size, AllocationScheme::block_source(block_size));
535 block = &m_state->current_block;
536 ++m_state->number_of_blocks;
537 } else {
538 block = &m_state->current_block;
539 }
540
541 /* temptable::Table is allowed to fit no more data than the given threshold
542 * controlled through TableResourceMonitor abstraction. TableResourceMonitor
543 * is a simple abstraction which is in its part an alias for tmp_table_size, a
544 * system variable that end MySQL users will be using to control this
545 * threshold.
546 *
547 * Updating the tmp_table_size threshold can only be done through the separate
548 * SET statement which implies that the tmp_table_size threshold cannot be
549 * updated during the duration of some query which is running within the same
550 * session. Separate sessions can still of course change this value to their
551 * liking.
552 */
553 if (m_table_resource_monitor.consumption() + n_bytes_requested >
554 m_table_resource_monitor.threshold()) {
556 }
557 m_table_resource_monitor.increase(n_bytes_requested);
558
559 T *chunk_data =
560 reinterpret_cast<T *>(block->allocate(n_bytes_requested).data());
561 assert(reinterpret_cast<uintptr_t>(chunk_data) % alignof(T) == 0);
562 return chunk_data;
563}
564
565template <class T, class AllocationScheme>
567 size_t n_elements) {
568 assert(reinterpret_cast<uintptr_t>(chunk_data) % alignof(T) == 0);
569
570 if (chunk_data == nullptr) {
571 return;
572 }
573
574 const size_t n_bytes_requested = n_elements * sizeof(T);
575
576 Block block = Block(Chunk(chunk_data));
577 const auto remaining_chunks =
578 block.deallocate(Chunk(chunk_data), n_bytes_requested);
579 if (remaining_chunks == 0) {
580 if (m_shared_block && (block == *m_shared_block)) {
581 // Do nothing. Keep the last block alive.
582 } else {
583 assert(m_state->number_of_blocks > 0);
584 if (block.type() == Source::RAM) {
585 MemoryMonitor::RAM::decrease(block.size());
586 } else {
587 MemoryMonitor::MMAP::decrease(block.size());
588 }
589 if (block == m_state->current_block) {
590 m_state->current_block.destroy();
591 } else {
592 block.destroy();
593 }
594 --m_state->number_of_blocks;
595 }
596 }
597 m_table_resource_monitor.decrease(n_bytes_requested);
598}
599
600template <class T, class AllocationScheme>
601template <class U, class... Args>
602inline void Allocator<T, AllocationScheme>::construct(U *mem, Args &&... args) {
603 new (mem) U(std::forward<Args>(args)...);
604}
605
606template <class T, class AllocationScheme>
607template <class U>
609 p->~U();
610}
611
612template <class T, class AllocationScheme>
615}
616
617} /* namespace temptable */
618
619#endif /* TEMPTABLE_ALLOCATOR_H */
Block abstraction for temptable-allocator.
Chunk abstraction for temptable Block allocator.
Custom memory allocator.
Definition: allocator.h:353
Block * m_shared_block
A block of memory which is a state external to this allocator and can be shared among different insta...
Definition: allocator.h:459
void construct(U *mem, Args &&... args)
Construct one object of type U on an already allocated chunk of memory, which must be large enough to...
Definition: allocator.h:602
T & reference
Definition: allocator.h:362
const T * const_pointer
Definition: allocator.h:361
void deallocate(T *ptr, size_t n_elements)
Free a memory allocated by allocate().
Definition: allocator.h:566
const T & const_reference
Definition: allocator.h:363
std::shared_ptr< AllocatorState > m_state
Shared state between all the copies and rebinds of this allocator.
Definition: allocator.h:452
void operator=(const Allocator< U > &&)=delete
Move operator, not used, thus disabled.
Allocator(const Allocator &)=default
~Allocator()
Destructor.
Allocator(Block *shared_block, TableResourceMonitor &table_resource_monitor)
Constructor.
Definition: allocator.h:469
T * pointer
Definition: allocator.h:356
T * allocate(size_t n_elements)
Allocate memory for storing n_elements number of elements.
Definition: allocator.h:507
size_t size_type
Definition: allocator.h:365
void destroy(U *p)
Destroy an object of type U.
Definition: allocator.h:608
static void init()
Initialize necessary structures.
Definition: allocator.h:613
bool operator!=(const Allocator< U > &rhs) const
Inequality operator.
Definition: allocator.h:501
TableResourceMonitor & m_table_resource_monitor
Table resource monitor control mechanism that limits the amount of resources that can be consumed at ...
Definition: allocator.h:463
bool operator==(const Allocator< U > &rhs) const
Equality operator.
Definition: allocator.h:494
ptrdiff_t difference_type
Definition: allocator.h:366
T value_type
Definition: allocator.h:364
void operator=(const Allocator< U > &)=delete
Assignment operator, not used, thus disabled.
Memory-block abstraction whose purpose is to serve as a building block for custom memory-allocator im...
Definition: block.h:162
static constexpr size_t ALIGN_TO
Block will self-adjust all requested allocation-sizes to the multiple of this value.
Definition: block.h:166
Chunk allocate(size_t chunk_size) noexcept
Allocate a Chunk from a Block.
Definition: block.h:350
static size_t size_hint(size_t n_bytes)
For given size, how much memory will Block with single Chunk actually occupy.
Definition: block.h:447
Chunk is an abstraction with the purpose of representing a smallest logical memory-unit within the Bl...
Definition: chunk.h:67
uint8_t * data() const
Get the pointer to the data section which will be provided to the end-user.
Definition: chunk.h:157
Definition: allocator.h:127
TableResourceMonitor(size_t threshold)
Definition: allocator.h:129
size_t m_total_bytes
Definition: allocator.h:148
size_t increase(size_t bytes)
Definition: allocator.h:132
size_t decrease(size_t bytes)
Definition: allocator.h:138
size_t m_threshold
Definition: allocator.h:147
size_t threshold()
Definition: allocator.h:143
size_t consumption()
Definition: allocator.h:144
const char * p
Definition: ctype-mb.cc:1234
#define U
Definition: ctype-tis620.cc:73
Memory utilities for temptable-allocator.
#define DBUG_EXECUTE_IF(keyword, a1)
Definition: my_dbug.h:170
Common header for many mysys elements.
ulonglong temptable_max_ram
Definition: mysqld.cc:1182
bool temptable_use_mmap
Definition: mysqld.cc:1184
ulonglong temptable_max_mmap
Definition: mysqld.cc:1183
Definition: varlen_sort.h:174
Definition: allocator.h:44
constexpr size_t ALLOCATOR_MAX_BLOCK_MB_EXP
log2(allocator max block size in MiB).
Definition: constants.h:59
void Block_PSI_init()
Initialize the PSI memory engine.
Definition: block.cc:73
constexpr size_t ALLOCATOR_MAX_BLOCK_BYTES
Limit on the size of a block created by Allocator (in bytes).
Definition: constants.h:64
Source
Type of memory allocated.
Definition: memutils.h:67
@ MMAP_FILE
Memory is allocated on disk, using mmap()'ed file.
@ RAM
Memory is allocated from RAM, using malloc() for example.
std::enable_if_t<!std::is_array< T >::value, std::shared_ptr< T > > make_shared(Args &&... args)
Dynamically allocates storage for an object of type T.
Definition: ut0new.h:2589
static MEM_ROOT mem
Definition: sql_servers.cc:99
TempTable constants.
Definition: allocator.h:191
static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested)
Definition: allocator.h:195
static Source block_source(size_t block_size)
Definition: allocator.h:192
Shared state between all instances of a given allocator.
Definition: allocator.h:302
Block current_block
Current not-yet-full block to feed allocations from.
Definition: allocator.h:304
size_t number_of_blocks
Number of created blocks so far (by this Allocator object).
Definition: allocator.h:311
Definition: allocator.h:369
Allocator< U, AllocationScheme > other
Definition: allocator.h:370
Definition: allocator.h:256
static size_t block_size(size_t number_of_blocks, size_t n_bytes_requested)
Given the current number of allocated blocks by the allocator, and number of bytes actually requested...
Definition: allocator.h:264
Definition: allocator.h:81
static size_t increase(size_t bytes)
Log increments of MMAP-backed memory consumption.
Definition: allocator.h:86
static size_t threshold()
Get MMAP-backed memory threshold level.
Definition: allocator.h:102
static size_t decrease(size_t bytes)
Log decrements of MMAP-backed memory consumption.
Definition: allocator.h:94
static size_t consumption()
Get current level of MMAP-backed memory consumption.
Definition: allocator.h:112
Definition: allocator.h:54
static size_t consumption()
Get current level of heap-memory consumption.
Definition: allocator.h:78
static size_t decrease(size_t bytes)
Log decrements of heap-memory consumption.
Definition: allocator.h:67
static size_t increase(size_t bytes)
Log increments of heap-memory consumption.
Definition: allocator.h:59
static size_t threshold()
Get heap-memory threshold level.
Definition: allocator.h:74
Definition: allocator.h:53
static std::atomic< size_t > mmap
Definition: allocator.h:118
static std::atomic< size_t > ram
Total bytes allocated so far by all threads in RAM/MMAP.
Definition: allocator.h:117
Definition: allocator.h:212
static Source block_source(uint32_t block_size)
Definition: allocator.h:213
Definition: dtoa.cc:588