MySQL 9.5.0
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2025, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
26
27#include <assert.h>
28#include <stdint.h>
29#include <cmath>
30#include <cstddef>
31#include <span>
32#include <type_traits>
33#include <utility>
34#include <vector>
35
36#include "my_alloc.h"
37#include "my_base.h"
38#include "my_table_map.h"
39#include "sql/item.h"
40// IWYU suggests removing row_iterator.h, but then the inlined short form of
41// CreateIteratorFromAccessPath() fails to compile. So use a pragma to keep it.
42#include "sql/iterators/row_iterator.h" // IWYU pragma: keep
47#include "sql/join_type.h"
48#include "sql/mem_root_array.h"
49#include "sql/olap.h"
50#include "sql/sql_class.h"
51#include "sql/table.h"
52
54class Filesort;
56class Item_func_match;
57class JOIN;
58class KEY;
60class QEP_TAB;
61class QUICK_RANGE;
62class SJ_TMP_TABLE;
63class Table_function;
65class Window;
66struct AccessPath;
69struct Index_lookup;
70struct KEY_PART;
71struct POSITION;
73
74/**
75 A specification that two specific relational expressions
76 (e.g., two tables, or a table and a join between two other tables)
77 should be joined together. The actual join conditions, if any,
78 live inside the “expr” object, as does the join type etc.
79 */
83
84 // If this join is made using a hash join, estimates the width
85 // of each row as stored in the hash table, in bytes.
87
88 // The set of (additional) functional dependencies that are active
89 // after this join predicate has been applied. E.g. if we're joining
90 // on t1.x = t2.x, there will be a bit for that functional dependency.
91 // We don't currently support more complex join conditions, but there's
92 // no conceptual reason why we couldn't, e.g. a join on a = b + c
93 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
94 // or {a, c} → b.
95 //
96 // Used in the processing of interesting orders.
98
99 // A less compact form of functional_dependencies, used during building
100 // (FunctionalDependencySet bitmaps are only available after all functional
101 // indexes have been collected and Build() has been called).
103
104 // A semijoin on the following format:
105 //
106 // SELECT ... FROM t1 WHERE EXISTS
107 // (SELECT ... FROM t2 WHERE t1.f1=t2.f2 AND t1.f3=t2.f4 ... t1.fn=t2.fm)
108 //
109 // may be transformed into an equivalent inner join:
110 //
111 // SELECT ... FROM (SELECT DISTINCT f2, f4...fm FROM t2) d JOIN t1
112 // ON t1.f1=d.f2 AND t1.f3=d.f4 ... t1.fn=d.fm
113 //
114 // If this is a suitable semijoin: This field will identify the the
115 // grouping given by (f2, f4..fm). (@see
116 // LogicalOrderings::RemapOrderingIndex() for a description of how this
117 // value can be mapped to an actual ordering). The join
118 // optimizer will then consider deduplicating on it and applying the
119 // above transform. If no such grouping was found, this field will be -1.
121
122 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
123 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
124 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
125 std::span<Item *> semijoin_group{};
126};
127
128/**
129 A filter of some sort that is not a join condition (those are stored
130 in JoinPredicate objects). AND conditions are typically split up into
131 multiple Predicates.
132 */
133struct Predicate {
135
136 // condition->used_tables(), converted to a NodeMap.
138
139 // tables referred to by the condition, plus any tables whose values
140 // can null any of those tables. (Even when reordering outer joins,
141 // at least one of those tables will still be present on the
142 // left-hand side of the outer join, so this is sufficient.)
143 //
144 // As a special case, we allow setting RAND_TABLE_BIT, even though it
145 // is normally part of a table_map, not a NodeMap.
147
149
150 // Whether this predicate is a join condition after all; it was promoted
151 // to a WHERE predicate since it was part of a cycle (see the comment in
152 // AddCycleEdges()). If it is, it is usually ignored so that we don't
153 // double-apply join conditions -- but if the join in question was not
154 // applied (because the cycle was broken at this point), the predicate
155 // would come into play. This is normally registered on the join itself
156 // (see RelationalExpression::join_predicate_bitmap), but having the bit
157 // on the predicate itself is used to avoid trying to push it down as a
158 // sargable predicate.
159 bool was_join_condition = false;
160
161 // Whether this predicate references tables that could be NULL-complemented
162 // later by an outer join. This could for example be true for degenerate outer
163 // join conditions that are pushed down as a table filter on one of the inner
164 // tables, or for join conditions in inner joins that are on the inner side of
165 // an outer join.
166 //
167 // We keep track of this here in order to prevent collection of functional
168 // dependencies from such predicates if the functional dependencies are not
169 // valid after the outer join.
171
172 // If this is a join condition that came from a multiple equality,
173 // and we have decided to create a mesh from that multiple equality,
174 // returns the index of it into the “multiple_equalities” array
175 // in MakeJoinHypergraph(). (You don't actually need the array to
176 // use this; it's just an opaque index to deduplicate between different
177 // predicates.) Otherwise, -1.
179
180 // See the equivalent fields in JoinPredicate.
183
184 // The list of all subqueries referred to in this predicate, if any.
185 // The optimizer uses this to add their materialized/non-materialized
186 // costs when evaluating filters.
188};
189
193};
194
195/// To indicate that a row estimate is not yet made.
196inline constexpr double kUnknownRowCount = -1.0;
197
198/// To indicate that a cost estimate is not yet made. We use a large negative
199/// value to avoid getting a positive result if we by mistake add this to
200/// a real (positive) cost.
201inline constexpr double kUnknownCost = -1e12;
202
203/// Calculate the cost of reading the first row from an access path, given
204/// estimates for init cost, total cost and the number of rows returned.
205inline double FirstRowCost(double init_cost, double total_cost,
206 double output_rows) {
207 assert(init_cost >= 0.0);
208 assert(total_cost >= init_cost);
209 assert(output_rows >= 0.0);
210 if (output_rows <= 1.0) {
211 return total_cost;
212 }
213 return init_cost + (total_cost - init_cost) / output_rows;
214}
215
216/**
217 Access paths are a query planning structure that correspond 1:1 to iterators,
218 in that an access path contains pretty much exactly the information
219 needed to instantiate given iterator, plus some information that is only
220 needed during planning, such as costs. (The new join optimizer will extend
221 this somewhat in the future. Some iterators also need the query block,
222 ie., JOIN object, they are part of, but that is implicitly available when
223 constructing the tree.)
224
225 AccessPath objects build on a variant, ie., they can hold an access path of
226 any type (table scan, filter, hash join, sort, etc.), although only one at the
227 same time. Currently, they contain 32 bytes of base information that is common
228 to any access path (type identifier, costs, etc.), and then up to 40 bytes
229 that is type-specific (e.g. for a table scan, the TABLE object). It would be
230 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
231 does not seem to be easy without fairly large contortions.
232
233 We could have solved this by inheritance, but the fixed-size design makes it
234 possible to replace an access path when a better one is found, without
235 introducing a new allocation, which will be important when using them as a
236 planning structure.
237 */
239 enum Type : uint8_t {
240 // Basic access paths (those with no children, at least nominally).
241 // NOTE: When adding more paths to this section, also update GetBasicTable()
242 // to handle them.
262
263 // Basic access paths that don't correspond to a specific table.
270
271 // Joins.
276
277 // Composite access paths.
293
294 // Access paths that modify tables.
298
299 /// A general enum to describe the safety of a given operation.
300 /// Currently we only use this to describe row IDs, but it can easily
301 /// be reused for safety of updating a table we're reading from
302 /// (the Halloween problem), or just generally unreproducible results
303 /// (e.g. a TABLESAMPLE changing due to external factors).
304 ///
305 /// Less safe values have higher numerical values.
306 enum Safety : uint8_t {
307 /// The given operation is always safe on this access path.
308 SAFE = 0,
309
310 /// The given operation is safe if this access path is scanned once,
311 /// but not if it's scanned multiple times (e.g. used on the inner side
312 /// of a nested-loop join). A typical example of this is a derived table
313 /// or CTE that is rematerialized on each scan, so that references to
314 /// the old values (such as row IDs) are no longer valid.
316
317 /// The given operation is unsafe on this access path, no matter how many
318 /// or few times it's scanned. Often, it may help to materialize it
319 /// (assuming the materialization itself doesn't use the operation
320 /// in question).
321 UNSAFE = 2
322 };
323
324 /// Whether it is safe to get row IDs (for sorting) from this access path.
326
327 /// Whether this access path counts as one that scans a base table,
328 /// and thus should be counted towards examined_rows. It can sometimes
329 /// seem a bit arbitrary which iterators count towards examined_rows
330 /// and which ones do not, so the only canonical reference is the tests.
331 bool count_examined_rows : 1 {false};
332
333 /// Whether this access path contains a GROUP_INDEX_SKIP_SCAN
334 bool has_group_skip_scan : 1 {false};
335
336#ifndef NDEBUG
337 /// Whether this access path is forced preferred over all others by means
338 /// of a SET DEBUG force_subplan_0x... statement.
339 bool forced_by_dbug : 1 {false};
340#endif
341
342 /// For UPDATE and DELETE statements: The node index of a table which can be
343 /// updated or deleted from immediately as the rows are read from the
344 /// iterator, if this path is only read from once. -1 if there is no such
345 /// table in this path.
346 ///
347 /// Note that this is an index into CostingReceiver's array of nodes, and is
348 /// not necessarily equal to the table number within the query block given by
349 /// Table_ref::tableno().
350 ///
351 /// The table, if any, is currently always the outermost table in the path.
352 ///
353 /// It is possible to have plans where it would be safe to operate
354 /// "immediately" on more than one table. For example, if we do a merge join,
355 /// it is safe to perform immediate deletes on tables on the inner side of the
356 /// join, since both sides are read only once. (However, we currently do not
357 /// support merge joins.)
358 ///
359 /// Another possibility is when the outer table of a nested loop join is
360 /// guaranteed to return at most one row (typically, a unique index lookup
361 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
362 /// nested loop join. But we don't to this yet.
363 ///
364 /// Hash joins read both sides exactly once, However, with hash joins, the
365 /// scans on the inner tables are not positioned on the correct row when the
366 /// result of the join is returned, so the immediate delete logic will need to
367 /// be changed to reposition the underlying scans before doing the immediate
368 /// deletes. While this can be done, it makes the benefit of immediate deletes
369 /// less obvious for these tables, and it can also be a loss in some cases,
370 /// because we lose the deduplication provided by the Unique object used for
371 /// buffered deletes (the immediate deletes could end up spending time
372 /// repositioning to already deleted rows). So we currently don't attempt to
373 /// do immediate deletes from inner tables of hash joins either.
374 ///
375 /// The outer table of a hash join can be deleted from immediately if the
376 /// inner table fits in memory. If the hash join spills to disk, though,
377 /// neither the rows of the outer table nor the rows of the inner table come
378 /// out in the order of the underlying scan, so it is not safe in general to
379 /// perform immediate deletes on the outer table of a hash join.
380 ///
381 /// If support for immediate operations on multiple tables is added,
382 /// this member could be changed from a node index to a NodeMap.
384
385 /// Which ordering the rows produced by this path follow, if any
386 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
387 /// but we don't want to add a dependency on interesting_orders.h from
388 /// this file, so we use the base type instead of the typedef here.
390
391 /// If an iterator has been instantiated for this access path, points to the
392 /// iterator. Used for constructing iterators that need to talk to each other
393 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
394 /// information in EXPLAIN ANALYZE queries.
396
397 double cost() const { return m_cost; }
398
399 double init_cost() const { return m_init_cost; }
400
401 /// The cost of reading the first row.
402 double first_row_cost() const {
404 }
405
406 double init_once_cost() const { return m_init_once_cost; }
407
408 double cost_before_filter() const { return m_cost_before_filter; }
409
410 void set_cost(double val) {
411 assert(std::isfinite(val));
412 assert(val >= 0.0 || val == kUnknownCost);
413 m_cost = val;
414 }
415
416 void set_init_cost(double val) {
417 assert(std::isfinite(val));
418 assert(val >= 0.0 || val == kUnknownCost);
419 m_init_cost = val;
420 }
421
422 void set_init_once_cost(double val) {
423 assert(std::isfinite(val));
424 assert(val >= 0.0);
425 m_init_once_cost = val;
426 }
427
428 void set_cost_before_filter(double val) {
429 assert(std::isfinite(val));
430 assert(val >= 0.0 || val == kUnknownCost);
432 }
433
434 /// Return the cost of scanning the given path for the second time
435 /// (or later) in the given query block. This is really the interesting
436 /// metric, not init_once_cost in itself, but since nearly all paths
437 /// have zero init_once_cost, storing that instead allows us to skip
438 /// a lot of repeated path->init_once_cost = path->init_cost calls
439 /// in the code.
440 double rescan_cost() const { return cost() - init_once_cost(); }
441
442 /// Return true if costs and row counts are consistent.
443 bool HasConsistentCostsAndRows(const JoinHypergraph &graph) const;
444
445 /// If no filter, identical to num_output_rows.
447
448 /// Bitmap of WHERE predicates that we are including on this access path,
449 /// referring to the “predicates” array internal to the join optimizer.
450 /// Since bit masks are much cheaper to deal with than creating Item
451 /// objects, and we don't invent new conditions during join optimization
452 /// (all of them are known when we begin optimization), we stick to
453 /// manipulating bit masks during optimization, saying which filters will be
454 /// applied at this node (a 1-bit means the filter will be applied here; if
455 /// there are multiple ones, they are ANDed together).
456 ///
457 /// This is used during join optimization only; before iterators are
458 /// created, we will add FILTER access paths to represent these instead,
459 /// removing the dependency on the array. Said FILTER paths are by
460 /// convention created with materialize_subqueries = false, since the by far
461 /// most common case is that there are no subqueries in the predicate.
462 /// In other words, if you wish to represent a filter with
463 /// materialize_subqueries = true, you will need to make an explicit FILTER
464 /// node.
465 ///
466 /// See also nested_loop_join().equijoin_predicates, which is for filters
467 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
469
470 /// Bitmap of sargable join predicates that have already been applied
471 /// in this access path by means of an index lookup (ref access),
472 /// again referring to “predicates”, and thus should not be counted again
473 /// for selectivity. Note that the filter may need to be applied
474 /// nevertheless (especially in case of type conversions); see
475 /// subsumed_sargable_join_predicates.
476 ///
477 /// Since these refer to the same array as filter_predicates, they will
478 /// never overlap with filter_predicates, and so we can reuse the same
479 /// memory using an alias (a union would not be allowed, since OverflowBitset
480 /// is a class with non-trivial default constructor), even though the meaning
481 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
482 /// bits 0..(N-1) belong to filter_predicates, and the rest to
483 /// applied_sargable_join_predicates.
485 return filter_predicates;
486 }
488 return filter_predicates;
489 }
490
491 /// Bitmap of WHERE predicates that touch tables we have joined in,
492 /// but that we could not apply yet (for instance because they reference
493 /// other tables, or because because we could not push them down into
494 /// the nullable side of outer joins). Used during planning only
495 /// (see filter_predicates).
497
498 /// Similar to applied_sargable_join_predicates, bitmap of sargable
499 /// join predicates that have been applied and will subsume the join
500 /// predicate entirely, ie., not only should the selectivity not be
501 /// double-counted, but the predicate itself is redundant and need not
502 /// be applied as a filter. (It is an error to have a bit set here but not
503 /// in applied_sargable_join_predicates.)
505 return delayed_predicates;
506 }
508 return delayed_predicates;
509 }
510
511 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
512 /// loaded when rows from this access path are evaluated; that is, this
513 /// access path must be put on the inner side of a nested-loop join (or
514 /// multiple such joins) where the outer side includes all of the given
515 /// tables.
516 ///
517 /// The most obvious case for this is dependent tables in LATERAL, but a more
518 /// common case is when we have pushed join conditions referring to those
519 /// tables; e.g., if this access path represents t1 and we have a condition
520 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
521 /// be set in this bitmap. We can still join in other tables, deferring t2,
522 /// but the bit(s) will then propagate, and we cannot be on the right side of
523 /// a hash join until parameter_tables is zero again. (Also see
524 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
525 /// as an optimization.)
526 ///
527 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
528 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
529 /// that the access path is entirely noncachable, because it depends on
530 /// something nondeterministic or an outer reference, and thus can never be on
531 /// the right side of a hash join, ever.
533
534 /// Auxiliary data used by a secondary storage engine while processing the
535 /// access path during optimization and execution. The secondary storage
536 /// engine is free to store any useful information in this member, for example
537 /// extra statistics or cost estimates. The data pointed to is fully owned by
538 /// the secondary storage engine, and it is the responsibility of the
539 /// secondary engine to manage the memory and make sure it is properly
540 /// destroyed.
541 void *secondary_engine_data{nullptr};
542
543 /// Signature used to uniquely identify the access path.
544 /// 0 meaning non-initialized.
545 size_t signature{0};
546
547 // Accessors for the union below.
548 auto &table_scan() {
549 assert(type == TABLE_SCAN);
550 return u.table_scan;
551 }
552 const auto &table_scan() const {
553 assert(type == TABLE_SCAN);
554 return u.table_scan;
555 }
556 auto &sample_scan() {
557 assert(type == SAMPLE_SCAN);
558 return u.sample_scan;
559 }
560 const auto &sample_scan() const {
561 assert(type == SAMPLE_SCAN);
562 return u.sample_scan;
563 }
564 auto &index_scan() {
565 assert(type == INDEX_SCAN);
566 return u.index_scan;
567 }
568 const auto &index_scan() const {
569 assert(type == INDEX_SCAN);
570 return u.index_scan;
571 }
573 assert(type == INDEX_DISTANCE_SCAN);
574 return u.index_distance_scan;
575 }
576 const auto &index_distance_scan() const {
577 assert(type == INDEX_DISTANCE_SCAN);
578 return u.index_distance_scan;
579 }
580 auto &ref() {
581 assert(type == REF);
582 return u.ref;
583 }
584 const auto &ref() const {
585 assert(type == REF);
586 return u.ref;
587 }
588 auto &ref_or_null() {
589 assert(type == REF_OR_NULL);
590 return u.ref_or_null;
591 }
592 const auto &ref_or_null() const {
593 assert(type == REF_OR_NULL);
594 return u.ref_or_null;
595 }
596 auto &eq_ref() {
597 assert(type == EQ_REF);
598 return u.eq_ref;
599 }
600 const auto &eq_ref() const {
601 assert(type == EQ_REF);
602 return u.eq_ref;
603 }
605 assert(type == PUSHED_JOIN_REF);
606 return u.pushed_join_ref;
607 }
608 const auto &pushed_join_ref() const {
609 assert(type == PUSHED_JOIN_REF);
610 return u.pushed_join_ref;
611 }
613 assert(type == FULL_TEXT_SEARCH);
614 return u.full_text_search;
615 }
616 const auto &full_text_search() const {
617 assert(type == FULL_TEXT_SEARCH);
618 return u.full_text_search;
619 }
620 auto &const_table() {
621 assert(type == CONST_TABLE);
622 return u.const_table;
623 }
624 const auto &const_table() const {
625 assert(type == CONST_TABLE);
626 return u.const_table;
627 }
628 auto &mrr() {
629 assert(type == MRR);
630 return u.mrr;
631 }
632 const auto &mrr() const {
633 assert(type == MRR);
634 return u.mrr;
635 }
636 auto &follow_tail() {
637 assert(type == FOLLOW_TAIL);
638 return u.follow_tail;
639 }
640 const auto &follow_tail() const {
641 assert(type == FOLLOW_TAIL);
642 return u.follow_tail;
643 }
645 assert(type == INDEX_RANGE_SCAN);
646 return u.index_range_scan;
647 }
648 const auto &index_range_scan() const {
649 assert(type == INDEX_RANGE_SCAN);
650 return u.index_range_scan;
651 }
652 auto &index_merge() {
653 assert(type == INDEX_MERGE);
654 return u.index_merge;
655 }
656 const auto &index_merge() const {
657 assert(type == INDEX_MERGE);
658 return u.index_merge;
659 }
661 assert(type == ROWID_INTERSECTION);
662 return u.rowid_intersection;
663 }
664 const auto &rowid_intersection() const {
665 assert(type == ROWID_INTERSECTION);
666 return u.rowid_intersection;
667 }
668 auto &rowid_union() {
669 assert(type == ROWID_UNION);
670 return u.rowid_union;
671 }
672 const auto &rowid_union() const {
673 assert(type == ROWID_UNION);
674 return u.rowid_union;
675 }
677 assert(type == INDEX_SKIP_SCAN);
678 return u.index_skip_scan;
679 }
680 const auto &index_skip_scan() const {
681 assert(type == INDEX_SKIP_SCAN);
682 return u.index_skip_scan;
683 }
685 assert(type == GROUP_INDEX_SKIP_SCAN);
686 return u.group_index_skip_scan;
687 }
688 const auto &group_index_skip_scan() const {
689 assert(type == GROUP_INDEX_SKIP_SCAN);
690 return u.group_index_skip_scan;
691 }
694 return u.dynamic_index_range_scan;
695 }
696 const auto &dynamic_index_range_scan() const {
698 return u.dynamic_index_range_scan;
699 }
702 return u.materialized_table_function;
703 }
704 const auto &materialized_table_function() const {
706 return u.materialized_table_function;
707 }
709 assert(type == UNQUALIFIED_COUNT);
710 return u.unqualified_count;
711 }
712 const auto &unqualified_count() const {
713 assert(type == UNQUALIFIED_COUNT);
714 return u.unqualified_count;
715 }
717 assert(type == TABLE_VALUE_CONSTRUCTOR);
718 return u.table_value_constructor;
719 }
720 const auto &table_value_constructor() const {
721 assert(type == TABLE_VALUE_CONSTRUCTOR);
722 return u.table_value_constructor;
723 }
725 assert(type == FAKE_SINGLE_ROW);
726 return u.fake_single_row;
727 }
728 const auto &fake_single_row() const {
729 assert(type == FAKE_SINGLE_ROW);
730 return u.fake_single_row;
731 }
732 auto &zero_rows() {
733 assert(type == ZERO_ROWS);
734 return u.zero_rows;
735 }
736 const auto &zero_rows() const {
737 assert(type == ZERO_ROWS);
738 return u.zero_rows;
739 }
741 assert(type == ZERO_ROWS_AGGREGATED);
742 return u.zero_rows_aggregated;
743 }
744 const auto &zero_rows_aggregated() const {
745 assert(type == ZERO_ROWS_AGGREGATED);
746 return u.zero_rows_aggregated;
747 }
748 auto &hash_join() {
749 assert(type == HASH_JOIN);
750 return u.hash_join;
751 }
752 const auto &hash_join() const {
753 assert(type == HASH_JOIN);
754 return u.hash_join;
755 }
756 auto &bka_join() {
757 assert(type == BKA_JOIN);
758 return u.bka_join;
759 }
760 const auto &bka_join() const {
761 assert(type == BKA_JOIN);
762 return u.bka_join;
763 }
765 assert(type == NESTED_LOOP_JOIN);
766 return u.nested_loop_join;
767 }
768 const auto &nested_loop_join() const {
769 assert(type == NESTED_LOOP_JOIN);
770 return u.nested_loop_join;
771 }
774 return u.nested_loop_semijoin_with_duplicate_removal;
775 }
778 return u.nested_loop_semijoin_with_duplicate_removal;
779 }
780 auto &filter() {
781 assert(type == FILTER);
782 return u.filter;
783 }
784 const auto &filter() const {
785 assert(type == FILTER);
786 return u.filter;
787 }
788 auto &sort() {
789 assert(type == SORT);
790 return u.sort;
791 }
792 const auto &sort() const {
793 assert(type == SORT);
794 return u.sort;
795 }
796 auto &aggregate() {
797 assert(type == AGGREGATE);
798 return u.aggregate;
799 }
800 const auto &aggregate() const {
801 assert(type == AGGREGATE);
802 return u.aggregate;
803 }
805 assert(type == TEMPTABLE_AGGREGATE);
806 return u.temptable_aggregate;
807 }
808 const auto &temptable_aggregate() const {
809 assert(type == TEMPTABLE_AGGREGATE);
810 return u.temptable_aggregate;
811 }
812 auto &limit_offset() {
813 assert(type == LIMIT_OFFSET);
814 return u.limit_offset;
815 }
816 const auto &limit_offset() const {
817 assert(type == LIMIT_OFFSET);
818 return u.limit_offset;
819 }
820 auto &stream() {
821 assert(type == STREAM);
822 return u.stream;
823 }
824 const auto &stream() const {
825 assert(type == STREAM);
826 return u.stream;
827 }
828 auto &materialize() {
829 assert(type == MATERIALIZE);
830 return u.materialize;
831 }
832 const auto &materialize() const {
833 assert(type == MATERIALIZE);
834 return u.materialize;
835 }
838 return u.materialize_information_schema_table;
839 }
842 return u.materialize_information_schema_table;
843 }
844 auto &append() {
845 assert(type == APPEND);
846 return u.append;
847 }
848 const auto &append() const {
849 assert(type == APPEND);
850 return u.append;
851 }
852 auto &window() {
853 assert(type == WINDOW);
854 return u.window;
855 }
856 const auto &window() const {
857 assert(type == WINDOW);
858 return u.window;
859 }
860 auto &weedout() {
861 assert(type == WEEDOUT);
862 return u.weedout;
863 }
864 const auto &weedout() const {
865 assert(type == WEEDOUT);
866 return u.weedout;
867 }
869 assert(type == REMOVE_DUPLICATES);
870 return u.remove_duplicates;
871 }
872 const auto &remove_duplicates() const {
873 assert(type == REMOVE_DUPLICATES);
874 return u.remove_duplicates;
875 }
878 return u.remove_duplicates_on_index;
879 }
880 const auto &remove_duplicates_on_index() const {
882 return u.remove_duplicates_on_index;
883 }
884 auto &alternative() {
885 assert(type == ALTERNATIVE);
886 return u.alternative;
887 }
888 const auto &alternative() const {
889 assert(type == ALTERNATIVE);
890 return u.alternative;
891 }
893 assert(type == CACHE_INVALIDATOR);
894 return u.cache_invalidator;
895 }
896 const auto &cache_invalidator() const {
897 assert(type == CACHE_INVALIDATOR);
898 return u.cache_invalidator;
899 }
900 auto &delete_rows() {
901 assert(type == DELETE_ROWS);
902 return u.delete_rows;
903 }
904 const auto &delete_rows() const {
905 assert(type == DELETE_ROWS);
906 return u.delete_rows;
907 }
908 auto &update_rows() {
909 assert(type == UPDATE_ROWS);
910 return u.update_rows;
911 }
912 const auto &update_rows() const {
913 assert(type == UPDATE_ROWS);
914 return u.update_rows;
915 }
916
917 double num_output_rows() const { return m_num_output_rows; }
918
919 void set_num_output_rows(double val) {
920 assert(std::isfinite(val));
921 assert(val == kUnknownRowCount || val >= 0.0);
922 m_num_output_rows = val;
923 }
924
925 private:
926 /// Expected number of output rows.
928
929 /// Expected cost to read all of this access path once.
931
932 /// Expected cost to initialize this access path; ie., cost to read
933 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
934 /// Note that EXPLAIN prints out cost of reading the _first_ row
935 /// because it is easier for the user and also easier to measure in
936 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
937 /// initialization cost, so that is what we use in this member.
938 /// kUnknownCost for unknown.
940
941 /// Of init_cost, how much of the initialization needs only to be done
942 /// once per query block. (This is a cost, not a proportion.)
943 /// Ie., if the access path can reuse some its initialization work
944 /// if Init() is called multiple times, this member will be nonzero.
945 /// A typical example is a materialized table with rematerialize=false;
946 /// the second time Init() is called, it's a no-op. Most paths will have
947 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
948 /// We do not intend to use this field to model cache effects.
949 ///
950 /// This is currently not printed in EXPLAIN, only optimizer trace.
951 double m_init_once_cost{0.0};
952
953 /// If no filter, identical to cost. init_cost is always the same
954 /// (filters have zero initialization cost).
956
957 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
958 // It is private to force all access to be through the type-checking
959 // accessors.
960 //
961 // For information about the meaning of each value, see the corresponding
962 // row iterator constructors.
963 union {
964 struct {
967 struct {
968 TABLE *table;
972 struct {
973 TABLE *table;
974 int idx;
978 struct {
979 TABLE *table;
980 int idx;
982 bool reverse;
984 struct {
985 TABLE *table;
987 bool use_order;
988 bool reverse;
990 struct {
991 TABLE *table;
993 bool use_order;
995 struct {
996 TABLE *table;
999 struct {
1000 TABLE *table;
1002 bool use_order;
1005 struct {
1006 TABLE *table;
1008 bool use_order;
1012 struct {
1013 TABLE *table;
1016 struct {
1017 TABLE *table;
1023 struct {
1024 TABLE *table;
1026 struct {
1027 // The key part(s) we are scanning on. Note that this may be an array.
1028 // You can get the table we are working on by looking into
1029 // used_key_parts[0].field->table (it is not stored directly, to avoid
1030 // going over the AccessPath size limits).
1032
1033 // The actual ranges we are scanning over (originally derived from “key”).
1034 // Not a Bounds_checked_array, to save 4 bytes on the length.
1036 unsigned num_ranges;
1037
1038 unsigned mrr_flags;
1040
1041 // Which index (in the TABLE) we are scanning over, and how many of its
1042 // key parts we are using.
1043 unsigned index;
1045
1046 // If true, the scan can return rows in rowid order.
1048
1049 // If true, the scan _should_ return rows in rowid order.
1050 // Should only be set if can_be_used_for_ror == true.
1052
1053 // If true, this plan can be used for index merge scan.
1055
1056 // See row intersection for more details.
1058
1059 // Whether we are scanning over a geometry key part.
1060 bool geometry : 1;
1061
1062 // Whether we need a reverse scan. Only supported if geometry == false.
1063 bool reverse : 1;
1064
1065 // For a reverse scan, if we are using extended key parts. It is needed,
1066 // to set correct flags when retrieving records.
1069 struct {
1070 TABLE *table;
1075 struct {
1076 TABLE *table;
1078
1079 // Clustered primary key scan, if any.
1081
1082 bool forced_by_hint;
1085
1086 // If true, the first child scan should reuse table->file instead of
1087 // creating its own. This is true if the intersection is the topmost
1088 // range scan, but _not_ if it's below a union. (The reasons for this
1089 // are unknown.) It can also be negated by logic involving
1090 // retrieve_full_rows and is_covering, again for unknown reasons.
1091 //
1092 // This is not only for performance; multi-table delete has a hidden
1093 // dependency on this behavior when running against certain types of
1094 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
1095 // when deleting (and not all table types can transfer the position of one
1096 // handler to another by using position()).
1097 bool reuse_handler;
1098
1099 // true if no row retrieval phase is necessary.
1102 struct {
1103 TABLE *table;
1105 bool forced_by_hint;
1107 struct {
1108 TABLE *table;
1109 unsigned index;
1110 unsigned num_used_key_parts;
1111 bool forced_by_hint;
1112
1113 // Large, and has nontrivial destructors, so split out into
1114 // its own allocation.
1117 struct {
1118 TABLE *table;
1119 unsigned index;
1120 unsigned num_used_key_parts;
1121 bool forced_by_hint;
1122
1123 // Large, so split out into its own allocation.
1126 struct {
1127 TABLE *table;
1128 QEP_TAB *qep_tab; // Used only for buffering.
1130 struct {
1131 TABLE *table;
1135 struct {
1137
1138 struct {
1141 struct {
1142 // No members.
1144 struct {
1145 // The child is optional. It is only used for keeping track of which
1146 // tables are pruned away by this path, and it is only needed when this
1147 // path is on the inner side of an outer join. See ZeroRowsIterator for
1148 // details. The child of a ZERO_ROWS access path will not be visited by
1149 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1150 // only if called with include_pruned_tables = true. No iterator is
1151 // created for the child, and the child is not shown by EXPLAIN.
1153 // Used for EXPLAIN only.
1154 // TODO(sgunders): make an enum.
1155 const char *cause;
1157 struct {
1158 // Used for EXPLAIN only.
1159 // TODO(sgunders): make an enum.
1160 const char *cause;
1162
1163 struct {
1167 bool store_rowids; // Whether we are below a weedout or not.
1171 struct {
1176 bool store_rowids; // Whether we are below a weedout or not.
1179 struct {
1181 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1185
1186 // Equijoin filters to apply before the join, if any.
1187 // Indexes into join_predicate->expr->equijoin_conditions.
1188 // Non-equijoin conditions are always applied.
1189 // If already_expanded_predicates is true, do not re-expand.
1191
1192 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1193 // this struct needs an initializer, or the union would not be
1194 // default-constructible. If we need more than one union member
1195 // with such an initializer, we would probably need to change
1196 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1197 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1198 nullptr, {}};
1199 struct {
1201 const TABLE *table;
1203 size_t key_len;
1205
1206 struct {
1209
1210 // This parameter, unlike nearly all others, is not passed to the the
1211 // actual iterator. Instead, if true, it signifies that when creating
1212 // the iterator, all materializable subqueries in “condition” should be
1213 // materialized (with any in2exists condition removed first). In the
1214 // very rare case that there are two or more such subqueries, this is
1215 // an all-or-nothing decision, for simplicity.
1216 //
1217 // See FinalizeMaterializedSubqueries().
1220 struct {
1224
1225 // If filesort is nullptr: A new filesort will be created at the
1226 // end of optimization, using this order and flags. Otherwise: Only
1227 // used by EXPLAIN.
1234 struct {
1238 struct {
1242 TABLE *table;
1246 struct {
1248 ha_rows limit;
1252 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1253 // See Query_expression::send_records.
1256 struct {
1258 JOIN *join;
1260 TABLE *table;
1262 int ref_slice;
1264 struct {
1265 // NOTE: The only legal access paths within table_path are
1266 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1267 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1269
1270 // Large, and has nontrivial destructors, so split out
1271 // into its own allocation.
1273 /** The total cost of executing the queries that we materialize.*/
1275 /// The number of materialized rows (as opposed to the number of rows
1276 /// fetched by table_path). Needed for 'explain'.
1279 struct {
1282 Item *condition;
1284 struct {
1287 struct {
1292 int ref_slice;
1295 struct {
1300 struct {
1301 using ItemSpan = std::span<Item *>;
1303
1304 /// @cond IGNORE
1305 // These functions somehow triggers a doxygen warning. (Presumably
1306 // a doxygen bug.)
1307 ItemSpan &group_items() {
1308 return reinterpret_cast<ItemSpan &>(m_group_items);
1309 }
1310
1311 const ItemSpan &group_items() const {
1312 return reinterpret_cast<const ItemSpan &>(m_group_items);
1313 }
1314 /// @endcond
1315
1316 private:
1317 // gcc 11 does not support a span as a union member. Replace this
1318 // with "std::span<Item *> group_items" when we move to newer gcc version.
1319 alignas(alignof(ItemSpan)) std::byte m_group_items[sizeof(ItemSpan)];
1321 struct {
1323 TABLE *table;
1324 KEY *key;
1327 struct {
1329
1330 // For the ref.
1334 struct {
1336 const char *name;
1338 struct {
1343 struct {
1348 } u;
1349};
1351 "AccessPath must be trivially destructible, as it is allocated "
1352 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1353 "(because multiple candidates during planning could point to "
1354 "the same access paths, and refcounting would be expensive)");
1355static_assert(sizeof(AccessPath) <= 152,
1356 "We are creating a lot of access paths in the join "
1357 "optimizer, so be sure not to bloat it without noticing. "
1358 "(104 bytes for the base, 52 bytes for the variant.)");
1359
1360inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1362 to->set_cost(from.cost());
1363 to->set_init_cost(from.init_cost());
1366 to->safe_for_rowid = from.safe_for_rowid;
1367 to->ordering_state = from.ordering_state;
1369 to->signature = from.signature;
1370}
1371
1372/// Return the name of an AccessPath::Type enumerator.
1373std::string_view AccessPathTypeName(AccessPath::Type type);
1374
1375// Trivial factory functions for all of the types of access paths above.
1376
1378 bool count_examined_rows) {
1379 AccessPath *path = new (thd->mem_root) AccessPath;
1381 path->count_examined_rows = count_examined_rows;
1382 path->table_scan().table = table;
1383 return path;
1384}
1385
1387 double sampling_percentage,
1388 bool count_examined_rows) {
1389 AccessPath *path = new (thd->mem_root) AccessPath;
1391 path->count_examined_rows = count_examined_rows;
1392 path->sample_scan().table = table;
1393 path->sample_scan().sampling_percentage = sampling_percentage;
1394 return path;
1395}
1396
1398 bool use_order, bool reverse,
1399 bool count_examined_rows) {
1400 AccessPath *path = new (thd->mem_root) AccessPath;
1402 path->count_examined_rows = count_examined_rows;
1403 path->index_scan().table = table;
1404 path->index_scan().idx = idx;
1405 path->index_scan().use_order = use_order;
1406 path->index_scan().reverse = reverse;
1407 return path;
1408}
1409
1411 bool use_order, bool reverse,
1412 bool count_examined_rows) {
1413 AccessPath *path = new (thd->mem_root) AccessPath;
1414 path->type = AccessPath::REF;
1415 path->count_examined_rows = count_examined_rows;
1416 path->ref().table = table;
1417 path->ref().ref = ref;
1418 path->ref().use_order = use_order;
1419 path->ref().reverse = reverse;
1420 return path;
1421}
1422
1424 Index_lookup *ref, bool use_order,
1425 bool count_examined_rows) {
1426 AccessPath *path = new (thd->mem_root) AccessPath;
1428 path->count_examined_rows = count_examined_rows;
1429 path->ref_or_null().table = table;
1430 path->ref_or_null().ref = ref;
1431 path->ref_or_null().use_order = use_order;
1432 return path;
1433}
1434
1436 bool count_examined_rows) {
1437 AccessPath *path = new (thd->mem_root) AccessPath;
1438 path->type = AccessPath::EQ_REF;
1439 path->count_examined_rows = count_examined_rows;
1440 path->eq_ref().table = table;
1441 path->eq_ref().ref = ref;
1442 return path;
1443}
1444
1446 Index_lookup *ref, bool use_order,
1447 bool is_unique,
1448 bool count_examined_rows) {
1449 AccessPath *path = new (thd->mem_root) AccessPath;
1451 path->count_examined_rows = count_examined_rows;
1452 path->pushed_join_ref().table = table;
1453 path->pushed_join_ref().ref = ref;
1454 path->pushed_join_ref().use_order = use_order;
1455 path->pushed_join_ref().is_unique = is_unique;
1456 return path;
1457}
1458
1461 Item_func_match *ft_func,
1462 bool use_order, bool use_limit,
1463 bool count_examined_rows) {
1464 AccessPath *path = new (thd->mem_root) AccessPath;
1466 path->count_examined_rows = count_examined_rows;
1467 path->full_text_search().table = table;
1468 path->full_text_search().ref = ref;
1469 path->full_text_search().use_order = use_order;
1470 path->full_text_search().use_limit = use_limit;
1471 path->full_text_search().ft_func = ft_func;
1472 return path;
1473}
1474
1477 bool count_examined_rows) {
1478 AccessPath *path = new (thd->mem_root) AccessPath;
1480 path->count_examined_rows = count_examined_rows;
1481 path->set_num_output_rows(1.0);
1482 path->set_cost(0.0);
1483 path->set_init_cost(0.0);
1484 path->set_init_once_cost(0.0);
1485 path->const_table().table = table;
1486 path->const_table().ref = ref;
1487 return path;
1488}
1489
1491 int mrr_flags) {
1492 AccessPath *path = new (thd->mem_root) AccessPath;
1493 path->type = AccessPath::MRR;
1494 path->mrr().table = table;
1495 path->mrr().ref = ref;
1496 path->mrr().mrr_flags = mrr_flags;
1497
1498 // This will be filled in when the BKA iterator is created.
1499 path->mrr().bka_path = nullptr;
1500
1501 return path;
1502}
1503
1505 bool count_examined_rows) {
1506 AccessPath *path = new (thd->mem_root) AccessPath;
1508 path->count_examined_rows = count_examined_rows;
1509 path->follow_tail().table = table;
1510 return path;
1511}
1512
1514 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1515 AccessPath *path = new (thd->mem_root) AccessPath;
1517 path->count_examined_rows = count_examined_rows;
1518 path->dynamic_index_range_scan().table = table;
1519 path->dynamic_index_range_scan().qep_tab = qep_tab;
1520 return path;
1521}
1522
1524 THD *thd, TABLE *table, Table_function *table_function,
1525 AccessPath *table_path) {
1526 AccessPath *path = new (thd->mem_root) AccessPath;
1528 path->materialized_table_function().table = table;
1529 path->materialized_table_function().table_function = table_function;
1530 path->materialized_table_function().table_path = table_path;
1531 return path;
1532}
1533
1535 AccessPath *path = new (thd->mem_root) AccessPath;
1537 return path;
1538}
1539
1541 const JOIN *join);
1542
1544 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1545 KEY *key, size_t key_len) {
1546 AccessPath *path = new (thd->mem_root) AccessPath;
1548 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1549 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1550 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1551 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1552 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1553 path->has_group_skip_scan =
1555 return path;
1556}
1557
1559 Item *condition) {
1560 AccessPath *path = new (thd->mem_root) AccessPath;
1561 path->type = AccessPath::FILTER;
1562 path->filter().child = child;
1563 path->filter().condition = condition;
1564 path->filter().materialize_subqueries = false;
1565 path->has_group_skip_scan = child->has_group_skip_scan;
1566 return path;
1567}
1568
1569// Not inline, because it needs access to filesort internals
1570// (which are forward-declared in this file).
1572 ORDER *order, bool count_examined_rows);
1573
1575 olap_type olap) {
1576 AccessPath *path = new (thd->mem_root) AccessPath;
1578 path->aggregate().child = child;
1579 path->aggregate().olap = olap;
1580 path->has_group_skip_scan = child->has_group_skip_scan;
1581 return path;
1582}
1583
1585 THD *thd, AccessPath *subquery_path, JOIN *join,
1586 Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path,
1587 int ref_slice) {
1588 AccessPath *path = new (thd->mem_root) AccessPath;
1590 path->temptable_aggregate().subquery_path = subquery_path;
1591 path->temptable_aggregate().join = join;
1592 path->temptable_aggregate().temp_table_param = temp_table_param;
1593 path->temptable_aggregate().table = table;
1594 path->temptable_aggregate().table_path = table_path;
1595 path->temptable_aggregate().ref_slice = ref_slice;
1596 return path;
1597}
1598
1600 ha_rows limit, ha_rows offset,
1601 bool count_all_rows,
1602 bool reject_multiple_rows,
1603 ha_rows *send_records_override) {
1605 AccessPath *path = new (thd->mem_root) AccessPath;
1607 path->immediate_update_delete_table = child->immediate_update_delete_table;
1608 path->limit_offset().child = child;
1609 path->limit_offset().limit = limit;
1610 path->limit_offset().offset = offset;
1611 path->limit_offset().count_all_rows = count_all_rows;
1612 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1613 path->limit_offset().send_records_override = send_records_override;
1614 CopyBasicProperties(*child, path);
1616 return path;
1617}
1618
1620 bool count_examined_rows) {
1621 AccessPath *path = new (thd->mem_root) AccessPath;
1623 path->count_examined_rows = count_examined_rows;
1624 path->set_num_output_rows(1.0);
1625 path->set_cost(0.0);
1626 path->set_init_cost(0.0);
1627 path->set_init_once_cost(0.0);
1628 return path;
1629}
1630
1632 const char *cause) {
1633 AccessPath *path = new (thd->mem_root) AccessPath;
1635 path->zero_rows().child = child;
1636 path->zero_rows().cause = cause;
1637 path->set_num_output_rows(0.0);
1638 path->set_cost(0.0);
1639 path->set_init_cost(0.0);
1640 path->set_init_once_cost(0.0);
1641 path->num_output_rows_before_filter = 0.0;
1642 path->set_cost_before_filter(0.0);
1643 return path;
1644}
1645
1646inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1647 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1648}
1649
1651 const char *cause) {
1652 AccessPath *path = new (thd->mem_root) AccessPath;
1654 path->zero_rows_aggregated().cause = cause;
1655 path->set_num_output_rows(1.0);
1656 path->set_cost(0.0);
1657 path->set_init_cost(0.0);
1658 return path;
1659}
1660
1662 JOIN *join,
1663 Temp_table_param *temp_table_param,
1664 TABLE *table, int ref_slice) {
1665 AccessPath *path = new (thd->mem_root) AccessPath;
1666 path->type = AccessPath::STREAM;
1667 path->stream().child = child;
1668 path->stream().join = join;
1669 path->stream().temp_table_param = temp_table_param;
1670 path->stream().table = table;
1671 path->stream().ref_slice = ref_slice;
1672 // Will be set later if we get a weedout access path as parent.
1673 path->stream().provide_rowid = false;
1674 path->has_group_skip_scan = child->has_group_skip_scan;
1675 return path;
1676}
1677
1680 JOIN *join, bool copy_items,
1681 Temp_table_param *temp_table_param) {
1682 assert(path != nullptr);
1684 MaterializePathParameters::Operand &operand = array[0];
1685 operand.subquery_path = path;
1686 operand.select_number = select_number;
1687 operand.join = join;
1689 operand.copy_items = copy_items;
1690 operand.temp_table_param = temp_table_param;
1691 return array;
1692}
1693
1697 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1698 int ref_slice, bool rematerialize, ha_rows limit_rows,
1699 bool reject_multiple_rows,
1704 param->m_operands = std::move(operands);
1705 if (rematerialize) {
1706 // There's no point in adding invalidators if we're rematerializing
1707 // every time anyway.
1708 param->invalidators = nullptr;
1709 } else {
1710 param->invalidators = invalidators;
1711 }
1712 param->table = table;
1713 param->cte = cte;
1714 param->unit = unit;
1715 param->ref_slice = ref_slice;
1716 param->rematerialize = rematerialize;
1717 param->limit_rows = (table == nullptr || table->is_union_or_table()
1718 ? limit_rows
1719 :
1720 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1721 // see its constructor
1722 HA_POS_ERROR);
1723 param->reject_multiple_rows = reject_multiple_rows;
1724 param->deduplication_reason = dedup_reason;
1725
1726#ifndef NDEBUG
1727 for (MaterializePathParameters::Operand &operand : param->m_operands) {
1728 assert(operand.subquery_path != nullptr);
1729 }
1730#endif
1731
1732 AccessPath *path = new (thd->mem_root) AccessPath;
1734 path->materialize().table_path = table_path;
1735 path->materialize().param = param;
1736 path->materialize().subquery_cost = kUnknownCost;
1737 path->materialize().subquery_rows = kUnknownRowCount;
1738 if (rematerialize) {
1739 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1740 } else {
1741 // The default; this is just to be explicit in the code.
1742 path->safe_for_rowid = AccessPath::SAFE;
1743 }
1744 return path;
1745}
1746
1748 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1749 AccessPath *path = new (thd->mem_root) AccessPath;
1751 path->materialize_information_schema_table().table_path = table_path;
1752 path->materialize_information_schema_table().table_list = table_list;
1753 path->materialize_information_schema_table().condition = condition;
1754 return path;
1755}
1756
1757/// Add path costs c1 and c2, but handle kUnknownCost correctly.
1758inline double AddCost(double c1, double c2) {
1759 // If one is undefined, use the other, as we have nothing else.
1760 if (c1 == kUnknownCost) {
1761 return c2;
1762 } else if (c2 == kUnknownCost) {
1763 return c1;
1764 } else {
1765 return c1 + c2;
1766 }
1767}
1768
1769/// Add row counts c1 and c2, but handle kUnknownRowCount correctly.
1770inline double AddRowCount(double c1, double c2) {
1771 // If one is undefined, use the other, as we have nothing else.
1772 if (c1 == kUnknownRowCount) {
1773 return c2;
1774 } else if (c2 == kUnknownRowCount) {
1775 return c1;
1776 } else {
1777 return c1 + c2;
1778 }
1779}
1780
1781// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1782// long as the access path.
1784 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1785 AccessPath *path = new (thd->mem_root) AccessPath;
1786 path->type = AccessPath::APPEND;
1787 path->append().children = children;
1788 double num_output_rows = kUnknownRowCount;
1789 for (const AppendPathParameters &child : *children) {
1790 path->set_cost(AddCost(path->cost(), child.path->cost()));
1791 path->set_init_cost(AddCost(path->init_cost(), child.path->init_cost()));
1792 path->set_init_once_cost(path->init_once_cost() +
1793 child.path->init_once_cost());
1794 num_output_rows =
1795 AddRowCount(num_output_rows, child.path->num_output_rows());
1796 }
1797 path->set_num_output_rows(num_output_rows);
1798 return path;
1799}
1800
1802 Window *window,
1803 Temp_table_param *temp_table_param,
1804 int ref_slice, bool needs_buffering) {
1805 AccessPath *path = new (thd->mem_root) AccessPath;
1806 path->type = AccessPath::WINDOW;
1807 path->window().child = child;
1808 path->window().window = window;
1809 path->window().temp_table = nullptr;
1810 path->window().temp_table_param = temp_table_param;
1811 path->window().ref_slice = ref_slice;
1812 path->window().needs_buffering = needs_buffering;
1813 path->set_num_output_rows(child->num_output_rows());
1814 return path;
1815}
1816
1818 SJ_TMP_TABLE *weedout_table) {
1819 AccessPath *path = new (thd->mem_root) AccessPath;
1820 path->type = AccessPath::WEEDOUT;
1821 path->weedout().child = child;
1822 path->weedout().weedout_table = weedout_table;
1823 path->weedout().tables_to_get_rowid_for =
1824 0; // Must be handled by the caller.
1825 return path;
1826}
1827
1829 THD *thd, AccessPath *child, std::span<Item *> group_items) {
1830 AccessPath *path = new (thd->mem_root) AccessPath;
1832 path->remove_duplicates().child = child;
1833 path->remove_duplicates().group_items() = group_items;
1834 path->has_group_skip_scan = child->has_group_skip_scan;
1835 return path;
1836}
1837
1839 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1840 unsigned loosescan_key_len) {
1841 AccessPath *path = new (thd->mem_root) AccessPath;
1843 path->remove_duplicates_on_index().child = child;
1844 path->remove_duplicates_on_index().table = table;
1845 path->remove_duplicates_on_index().key = key;
1846 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1847 path->has_group_skip_scan = child->has_group_skip_scan;
1848 return path;
1849}
1850
1852 AccessPath *table_scan_path,
1853 Index_lookup *used_ref) {
1854 AccessPath *path = new (thd->mem_root) AccessPath;
1856 path->alternative().table_scan_path = table_scan_path;
1857 path->alternative().child = child;
1858 path->alternative().used_ref = used_ref;
1859 return path;
1860}
1861
1863 const char *name) {
1864 AccessPath *path = new (thd->mem_root) AccessPath;
1866 path->cache_invalidator().child = child;
1867 path->cache_invalidator().name = name;
1868 return path;
1869}
1870
1873 table_map immediate_tables);
1874
1876 table_map update_tables,
1877 table_map immediate_tables);
1878
1879/**
1880 Modifies "path" and the paths below it so that they provide row IDs for
1881 all tables.
1882
1883 This also figures out how the row IDs should be retrieved for each table in
1884 the input to the path. If the handler of the table is positioned on the
1885 correct row while reading the input, handler::position() can be called to get
1886 the row ID from the handler. However, if the input iterator returns rows
1887 without keeping the position of the underlying handlers in sync, calling
1888 handler::position() will not be able to provide the row IDs. Specifically,
1889 hash join and BKA join do not keep the underlying handlers positioned on the
1890 right row. Therefore, this function will instruct every hash join or BKA join
1891 below "path" to maintain row IDs in the join buffer, and updating handler::ref
1892 in every input table for each row they return. Then "path" does not need to
1893 call handler::position() to get it (nor should it, since calling it would
1894 overwrite the correct row ID with a stale one).
1895
1896 The tables on which "path" should call handler::position() are stored in a
1897 `tables_to_get_rowid_for` bitset in "path". For all the other tables, it can
1898 assume that handler::ref already contains the correct row ID.
1899 */
1901
1904 bool eligible_for_batch_mode);
1905
1906// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1907// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1908// that does anything else is DynamicRangeIterator.)
1910 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1912 eligible_for_batch_mode);
1913}
1914
1915void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1916 const POSITION *pos, bool is_after_filter,
1917 AccessPath *path);
1918
1919/**
1920 Return the TABLE* referred from 'path' if it is a basic access path,
1921 else a nullptr is returned. Temporary tables, such as those used by
1922 sorting, aggregate and subquery materialization are not returned.
1923*/
1925
1926/**
1927 Applies the secondary storage engine nrows modification function, if any.
1928
1929 @param params input params for the callback function.
1930 Refer to typedef for the actual input parameters explainations.
1931
1932 @return true if secondary engine has proposed modification to ap's nrows.
1933*/
1935 const SecondaryEngineNrowsParameters &params);
1936
1937/**
1938 Returns whether SecondaryNrows is applicable given the parameters.
1939
1940 @param path access path to be verified.
1941 @param thd current query thd.
1942 @param graph current query block hypergraph.
1943
1944 @return true if nrows hook is applicable.
1945*/
1947 const JoinHypergraph *graph);
1948
1949/**
1950 Returns whether SecondaryNrows hook enabled and is applicable given the
1951 parameters.
1952
1953 @param path access path to be verified.
1954 @param thd current query thd.
1955 @param graph current query block hypergraph.
1956
1957 @return true if nrows hook enabled and is applicable.
1958*/
1960 const JoinHypergraph *graph);
1961
1962/**
1963 Returns a map of all tables read when `path` or any of its children are
1964 executed. Only iterators that are part of the same query block as `path`
1965 are considered.
1966
1967 If a table is read that doesn't have a map, specifically the temporary
1968 tables made as part of materialization within the same query block,
1969 RAND_TABLE_BIT will be set as a convention and none of that access path's
1970 children will be included in the map. In this case, the caller will need to
1971 manually go in and find said access path, to ask it for its TABLE object.
1972
1973 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1974 access path (ie., pruned away due to impossible join conditions) will be
1975 included in the map. This is normally what you want, as those tables need to
1976 be included whenever you store NULL flags and the likes, but if you don't
1977 want them (perhaps to specifically check for conditions referring to pruned
1978 tables), you can set it to false.
1979 */
1980table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1981
1982/**
1983 Find the list of all tables used by this root, stopping at materializations.
1984 Used for knowing which tables to sort.
1985 */
1987
1988/**
1989 For each access path in the (sub)tree rooted at “path”, expand any use of
1990 “filter_predicates” into newly-inserted FILTER access paths, using the given
1991 predicate list. This is used after finding an optimal set of access paths,
1992 to normalize the tree so that the remaining consumers do not need to worry
1993 about filter_predicates and cost_before_filter.
1994
1995 “join” is the join that “path” is part of.
1996 */
1997void ExpandFilterAccessPaths(THD *thd, const JoinHypergraph &graph,
1998 AccessPath *path, const JOIN *join);
1999
2000/**
2001 Extracts the Item expression from the given “filter_predicates” corresponding
2002 to the given “mask”.
2003 */
2006 int num_where_predicates);
2007
2008/// Like ExpandFilterAccessPaths(), but expands only the single access path
2009/// at “path”.
2010void ExpandSingleFilterAccessPath(THD *thd, const JoinHypergraph &graph,
2011 AccessPath *path, const JOIN *join);
2012
2013/**
2014 Clear all the bits representing filter predicates in a bitset, and keep only
2015 the bits representing applied sargable join conditions.
2016
2017 See AccessPath::filter_predicates and
2018 AccessPath::applied_sargable_join_predicates() for details about how filter
2019 predicates and applied sargable join predicates are stored in different
2020 partitions of the same bitset.
2021
2022 @param predicates A bitset representing both filter predicates and applied
2023 sargable join predicates.
2024 @param num_where_predicates The number of filter predicates.
2025 @param mem_root The root on which to allocate memory, if needed.
2026
2027 @return A copy of "predicates" with only the bits for applied sargable join
2028 predicates set.
2029 */
2031 int num_where_predicates,
2033
2034/// Returns the tables that are part of a hash join.
2036
2037/**
2038 Get the conditions to put into the extra conditions of the HashJoinIterator.
2039 This includes the non-equijoin conditions, as well as any equijoin conditions
2040 on columns that are too big to include in the hash table. (The old optimizer
2041 handles equijoin conditions on long columns elsewhere, so the last part only
2042 applies to the hypergraph optimizer.)
2043
2044 @param mem_root The root on which to allocate memory, if needed.
2045 @param using_hypergraph_optimizer True if using the hypergraph optimizer.
2046 @param equijoin_conditions All the equijoin conditions of the join.
2047 @param other_conditions All the non-equijoin conditions of the join.
2048
2049 @return All the conditions to evaluate as "extra conditions" in
2050 HashJoinIterator, or nullptr on OOM.
2051 */
2053 MEM_ROOT *mem_root, bool using_hypergraph_optimizer,
2054 const std::vector<HashJoinCondition> &equijoin_conditions,
2055 const Mem_root_array<Item *> &other_conditions);
2056
2057/**
2058 Update status variables which count how many scans of various types are used
2059 in a query plan.
2060
2061 The following status variables are updated: Select_scan, Select_full_join,
2062 Select_range, Select_full_range_join, Select_range_check. They are also stored
2063 as performance schema statement events with the same names.
2064
2065 In addition, the performance schema statement events NO_INDEX_USED and
2066 NO_GOOD_INDEX_USED are updated, if appropriate.
2067 */
2068void CollectStatusVariables(THD *thd, const JOIN *top_join,
2069 const AccessPath &top_path);
2070
2071#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
constexpr double kUnknownCost
To indicate that a cost estimate is not yet made.
Definition: access_path.h:201
bool ApplySecondaryEngineNrowsHook(const SecondaryEngineNrowsParameters &params)
Applies the secondary storage engine nrows modification function, if any.
Definition: access_path.cc:133
void ExpandSingleFilterAccessPath(THD *thd, const JoinHypergraph &graph, AccessPath *path, const JOIN *join)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1725
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1661
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1862
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, std::span< Item * > group_items)
Definition: access_path.h:1828
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1410
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:183
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1360
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1459
double AddRowCount(double c1, double c2)
Add row counts c1 and c2, but handle kUnknownRowCount correctly.
Definition: access_path.h:1770
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map update_tables, table_map immediate_tables)
Definition: access_path.cc:195
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1879
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1513
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::Operand > operands, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows, MaterializePathParameters::DedupType dedup_reason=MaterializePathParameters::NO_DEDUP)
Definition: access_path.h:1694
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1650
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1851
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1490
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:432
bool IsSecondaryEngineNrowsHookApplicable(AccessPath *path, THD *thd, const JoinHypergraph *graph)
Returns whether SecondaryNrows is applicable given the parameters.
Definition: access_path.cc:101
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1783
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1543
std::string_view AccessPathTypeName(AccessPath::Type type)
Return the name of an AccessPath::Type enumerator.
Definition: access_path.cc:333
void CollectStatusVariables(THD *thd, const JOIN *top_join, const AccessPath &top_path)
Update status variables which count how many scans of various types are used in a query plan.
Definition: access_path.cc:1893
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1584
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1838
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1619
AccessPath * NewTableValueConstructorAccessPath(const THD *thd, const JOIN *join)
Definition: access_path.cc:227
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1714
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1558
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:196
void ExpandFilterAccessPaths(THD *thd, const JoinHypergraph &graph, AccessPath *path, const JOIN *join)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1861
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:148
bool IsSecondaryNrowsHookEnabledAndApplicable(AccessPath *path, THD *thd, const JoinHypergraph *graph)
Returns whether SecondaryNrows hook enabled and is applicable given the parameters.
Definition: access_path.cc:117
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1523
const Mem_root_array< Item * > * GetExtraHashJoinConditions(MEM_ROOT *mem_root, bool using_hypergraph_optimizer, const std::vector< HashJoinCondition > &equijoin_conditions, const Mem_root_array< Item * > &other_conditions)
Get the conditions to put into the extra conditions of the HashJoinIterator.
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:463
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1377
AccessPath * NewSampleScanAccessPath(THD *thd, TABLE *table, double sampling_percentage, bool count_examined_rows)
Definition: access_path.h:1386
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, olap_type olap)
Definition: access_path.h:1574
double AddCost(double c1, double c2)
Add path costs c1 and c2, but handle kUnknownCost correctly.
Definition: access_path.h:1758
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1747
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1550
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:276
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1423
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1599
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1435
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1801
double FirstRowCost(double init_cost, double total_cost, double output_rows)
Calculate the cost of reading the first row from an access path, given estimates for init cost,...
Definition: access_path.h:205
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1504
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1475
MutableOverflowBitset ClearFilterPredicates(OverflowBitset predicates, int num_where_predicates, MEM_ROOT *mem_root)
Clear all the bits representing filter predicates in a bitset, and keep only the bits representing ap...
Definition: access_path.cc:1870
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:699
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1817
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1445
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1631
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1534
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1397
Mem_root_array< MaterializePathParameters::Operand > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1679
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4580
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:54
Sorting related info.
Definition: filesort.h:52
A class that represents a join condition in a hash join.
Definition: item_cmpfunc.h:92
Definition: item_func.h:3601
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:928
Definition: sql_optimizer.h:133
Definition: key.h:113
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:432
Definition: overflow_bitset.h:179
Definition: overflow_bitset.h:81
Definition: sql_executor.h:256
Definition: range_optimizer.h:69
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:643
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
Definition: sql_executor.h:95
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:40
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2933
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:97
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:110
static MEM_ROOT mem_root
Definition: client_plugin.cc:114
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a WINDOW AccessPath.
Definition: cost_model.cc:1677
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:367
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:2009
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:63
JoinType
Definition: join_type.h:28
unsigned char byte
Blob class.
Definition: common.h:151
static mi_bit_type mask[]
Definition: mi_packrec.cc:141
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:480
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1217
#define HA_POS_ERROR
Definition: my_base.h:1219
uint64_t table_map
Definition: my_table_map.h:30
static char * path
Definition: mysqldump.cc:150
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:40
ValueType value(const std::optional< ValueType > &v)
Definition: gtid.h:83
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:132
std::string join(const detail::range auto &rng, std::string_view delim)
join elements of a range into a string separated by a delimiter.
Definition: string.h:74
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:39
olap_type
Definition: olap.h:31
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
required string type
Definition: replication_group_member_actions.proto:34
case opt name
Definition: sslopt-case.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:238
auto & weedout()
Definition: access_path.h:860
auto & filter()
Definition: access_path.h:780
bool count_all_rows
Definition: access_path.h:1250
AccessPath * bka_path
Definition: access_path.h:1019
struct AccessPath::@67::@97 filter
auto & ref_or_null()
Definition: access_path.h:588
auto & materialized_table_function()
Definition: access_path.h:700
AccessPath * cpk_child
Definition: access_path.h:1080
auto & rowid_union()
Definition: access_path.h:668
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:532
const auto & bka_join() const
Definition: access_path.h:760
struct AccessPath::@67::@93 hash_join
TABLE * temp_table
Definition: access_path.h:1290
OverflowBitset equijoin_predicates
Definition: access_path.h:1190
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:440
auto & materialize()
Definition: access_path.h:828
const auto & temptable_aggregate() const
Definition: access_path.h:808
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:504
auto & index_scan()
Definition: access_path.h:564
struct AccessPath::@67::@85 group_index_skip_scan
struct AccessPath::@67::@79 follow_tail
const auto & delete_rows() const
Definition: access_path.h:904
struct AccessPath::@67::@92 zero_rows_aggregated
struct AccessPath::@67::@89 table_value_constructor
const auto & filter() const
Definition: access_path.h:784
struct AccessPath::@67::@84 index_skip_scan
struct AccessPath::@67::@77 const_table
const auto & index_distance_scan() const
Definition: access_path.h:576
auto & delete_rows()
Definition: access_path.h:900
bool reuse_handler
Definition: access_path.h:1057
KEY_PART * used_key_part
Definition: access_path.h:1031
double m_init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:939
olap_type olap
Definition: access_path.h:1236
struct AccessPath::@67::@73 ref_or_null
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:484
Item * condition
Definition: access_path.h:1208
const auto & index_merge() const
Definition: access_path.h:656
const auto & update_rows() const
Definition: access_path.h:912
double subquery_rows
The number of materialized rows (as opposed to the number of rows fetched by table_path).
Definition: access_path.h:1277
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:884
auto & pushed_join_ref()
Definition: access_path.h:604
void set_cost(double val)
Definition: access_path.h:410
AccessPath * outer
Definition: access_path.h:1164
std::span< Item * > ItemSpan
Definition: access_path.h:1301
auto & index_skip_scan()
Definition: access_path.h:676
bool rewrite_semi_to_inner
Definition: access_path.h:1168
const auto & zero_rows_aggregated() const
Definition: access_path.h:744
auto & group_index_skip_scan()
Definition: access_path.h:684
struct AccessPath::@67::@107 weedout
const auto & eq_ref() const
Definition: access_path.h:600
bool use_order
Definition: access_path.h:975
bool pfs_batch_mode
Definition: access_path.h:1182
auto & temptable_aggregate()
Definition: access_path.h:804
GroupIndexSkipScanParameters * param
Definition: access_path.h:1124
auto & rowid_intersection()
Definition: access_path.h:660
double init_cost() const
Definition: access_path.h:399
bool materialize_subqueries
Definition: access_path.h:1218
AccessPath * child
Definition: access_path.h:1152
bool allow_spill_to_disk
Definition: access_path.h:1166
struct AccessPath::@67::@82 rowid_intersection
struct AccessPath::@67::@83 rowid_union
Index_lookup * used_ref
Definition: access_path.h:1332
auto & mrr()
Definition: access_path.h:628
std::byte m_group_items[sizeof(ItemSpan)]
Definition: access_path.h:1319
auto & sample_scan()
Definition: access_path.h:556
const auto & index_scan() const
Definition: access_path.h:568
const auto & fake_single_row() const
Definition: access_path.h:728
struct AccessPath::@67::@102 stream
bool reject_multiple_rows
Definition: access_path.h:1251
struct AccessPath::@67::@81 index_merge
bool allow_clustered_primary_key_scan
Definition: access_path.h:1072
const char * name
Definition: access_path.h:1336
bool use_limit
Definition: access_path.h:1009
Window * window
Definition: access_path.h:1289
table_map tables_to_get_rowid_for
Definition: access_path.h:1169
auto & materialize_information_schema_table()
Definition: access_path.h:836
auto & bka_join()
Definition: access_path.h:756
struct AccessPath::@67::@96 nested_loop_semijoin_with_duplicate_removal
const auto & follow_tail() const
Definition: access_path.h:640
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1297
bool has_group_skip_scan
Whether this access path contains a GROUP_INDEX_SKIP_SCAN.
Definition: access_path.h:334
Index_lookup * ref
Definition: access_path.h:986
struct AccessPath::@67::@101 limit_offset
const auto & window() const
Definition: access_path.h:856
void set_init_once_cost(double val)
Definition: access_path.h:422
const auto & materialized_table_function() const
Definition: access_path.h:704
auto & unqualified_count()
Definition: access_path.h:708
bool keep_current_rowid
Definition: access_path.h:1021
bool using_extended_key_parts
Definition: access_path.h:1067
auto & nested_loop_join()
Definition: access_path.h:764
const auto & full_text_search() const
Definition: access_path.h:616
struct AccessPath::@67::@87 materialized_table_function
bool can_be_used_for_ror
Definition: access_path.h:1047
float rec_per_key
Definition: access_path.h:1175
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:325
JOIN * join
Definition: access_path.h:1240
unsigned index
Definition: access_path.h:1043
unsigned mrr_buf_size
Definition: access_path.h:1039
auto & remove_duplicates()
Definition: access_path.h:868
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:395
const auto & stream() const
Definition: access_path.h:824
struct AccessPath::@67::@71 index_distance_scan
const auto & remove_duplicates() const
Definition: access_path.h:872
auto & window()
Definition: access_path.h:852
bool need_rows_in_rowid_order
Definition: access_path.h:1051
Table_ref * table_list
Definition: access_path.h:1281
const auto & unqualified_count() const
Definition: access_path.h:712
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:541
struct AccessPath::@67::@68 table_scan
struct AccessPath::@67::@110 alternative
void set_init_cost(double val)
Definition: access_path.h:416
void set_num_output_rows(double val)
Definition: access_path.h:919
bool is_covering
Definition: access_path.h:1100
const auto & mrr() const
Definition: access_path.h:632
void set_cost_before_filter(double val)
Definition: access_path.h:428
bool remove_duplicates
Definition: access_path.h:1230
table_map tables_to_update
Definition: access_path.h:1345
const auto & table_value_constructor() const
Definition: access_path.h:720
KEY * key
Definition: access_path.h:1202
bool HasConsistentCostsAndRows(const JoinHypergraph &graph) const
Return true if costs and row counts are consistent.
Definition: access_path.cc:1971
auto & table_value_constructor()
Definition: access_path.h:716
auto & stream()
Definition: access_path.h:820
union AccessPath::@67 u
size_t key_len
Definition: access_path.h:1203
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:507
const auto & zero_rows() const
Definition: access_path.h:736
const auto & append() const
Definition: access_path.h:848
ha_rows offset
Definition: access_path.h:1249
int idx
Definition: access_path.h:974
auto & hash_join()
Definition: access_path.h:748
auto & cache_invalidator()
Definition: access_path.h:892
struct AccessPath::@67::@90 fake_single_row
bool force_sort_rowids
Definition: access_path.h:1232
unsigned num_used_key_parts
Definition: access_path.h:1044
unsigned num_ranges
Definition: access_path.h:1036
ORDER * order
Definition: access_path.h:1228
auto & sort()
Definition: access_path.h:788
struct AccessPath::@67::@80 index_range_scan
auto & fake_single_row()
Definition: access_path.h:724
ha_rows limit
Definition: access_path.h:1229
auto & follow_tail()
Definition: access_path.h:636
double m_num_output_rows
Expected number of output rows.
Definition: access_path.h:927
const auto & sample_scan() const
Definition: access_path.h:560
const JoinPredicate * join_predicate
Definition: access_path.h:1165
const auto & const_table() const
Definition: access_path.h:624
const auto & dynamic_index_range_scan() const
Definition: access_path.h:696
const auto & table_scan() const
Definition: access_path.h:552
struct AccessPath::@67::@70 index_scan
unsigned mrr_flags
Definition: access_path.h:1038
int ref_slice
Definition: access_path.h:1244
struct AccessPath::@67::@112 delete_rows
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:468
bool can_be_used_for_imerge
Definition: access_path.h:1054
bool forced_by_hint
Definition: access_path.h:1071
auto & limit_offset()
Definition: access_path.h:812
AccessPath * inner
Definition: access_path.h:1164
QUICK_RANGE * range
Definition: access_path.h:981
bool provide_rowid
Definition: access_path.h:1261
const auto & index_range_scan() const
Definition: access_path.h:648
struct AccessPath::@67::@99 aggregate
struct AccessPath::@67::@75 pushed_join_ref
const auto & ref() const
Definition: access_path.h:584
struct AccessPath::@67::@91 zero_rows
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:339
Item_func_match * ft_func
Definition: access_path.h:1010
ha_rows * send_records_override
Definition: access_path.h:1254
auto & table_scan()
Definition: access_path.h:548
const auto & hash_join() const
Definition: access_path.h:752
QEP_TAB * qep_tab
Definition: access_path.h:1128
Table_function * table_function
Definition: access_path.h:1132
struct AccessPath::@67::@109 remove_duplicates_on_index
auto & zero_rows_aggregated()
Definition: access_path.h:740
const auto & sort() const
Definition: access_path.h:792
bool unwrap_rollup
Definition: access_path.h:1231
struct AccessPath::@67::@74 eq_ref
struct AccessPath::@67::@94 bka_join
struct AccessPath::@67::@78 mrr
Type
Definition: access_path.h:239
@ FOLLOW_TAIL
Definition: access_path.h:254
@ FILTER
Definition: access_path.h:278
@ PUSHED_JOIN_REF
Definition: access_path.h:250
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:267
@ UPDATE_ROWS
Definition: access_path.h:296
@ AGGREGATE
Definition: access_path.h:280
@ BKA_JOIN
Definition: access_path.h:274
@ ZERO_ROWS
Definition: access_path.h:266
@ CONST_TABLE
Definition: access_path.h:252
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:260
@ SAMPLE_SCAN
Definition: access_path.h:244
@ INDEX_RANGE_SCAN
Definition: access_path.h:255
@ UNQUALIFIED_COUNT
Definition: access_path.h:269
@ EQ_REF
Definition: access_path.h:249
@ FAKE_SINGLE_ROW
Definition: access_path.h:265
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:285
@ WINDOW
Definition: access_path.h:287
@ REF_OR_NULL
Definition: access_path.h:248
@ MATERIALIZE
Definition: access_path.h:284
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:273
@ ROWID_UNION
Definition: access_path.h:258
@ INDEX_SKIP_SCAN
Definition: access_path.h:259
@ MRR
Definition: access_path.h:253
@ CACHE_INVALIDATOR
Definition: access_path.h:292
@ INDEX_SCAN
Definition: access_path.h:245
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:264
@ WEEDOUT
Definition: access_path.h:288
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:268
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:290
@ TABLE_SCAN
Definition: access_path.h:243
@ REF
Definition: access_path.h:247
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:281
@ LIMIT_OFFSET
Definition: access_path.h:282
@ APPEND
Definition: access_path.h:286
@ NESTED_LOOP_JOIN
Definition: access_path.h:272
@ INDEX_MERGE
Definition: access_path.h:256
@ FULL_TEXT_SEARCH
Definition: access_path.h:251
@ ALTERNATIVE
Definition: access_path.h:291
@ STREAM
Definition: access_path.h:283
@ REMOVE_DUPLICATES
Definition: access_path.h:289
@ ROWID_INTERSECTION
Definition: access_path.h:257
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:261
@ DELETE_ROWS
Definition: access_path.h:295
@ SORT
Definition: access_path.h:279
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:246
@ HASH_JOIN
Definition: access_path.h:275
bool retrieve_full_rows
Definition: access_path.h:1083
double m_init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:951
struct AccessPath::@67::@86 dynamic_index_range_scan
const auto & rowid_union() const
Definition: access_path.h:672
const TABLE * table
Definition: access_path.h:1201
const auto & index_skip_scan() const
Definition: access_path.h:680
auto & dynamic_index_range_scan()
Definition: access_path.h:692
const auto & pushed_join_ref() const
Definition: access_path.h:608
struct AccessPath::@67::@111 cache_invalidator
auto & remove_duplicates_on_index()
Definition: access_path.h:876
table_map immediate_tables
Definition: access_path.h:1341
double m_cost_before_filter
If no filter, identical to cost.
Definition: access_path.h:955
double cost() const
Definition: access_path.h:397
struct AccessPath::@67::@88 unqualified_count
int mrr_flags
Definition: access_path.h:1020
auto & full_text_search()
Definition: access_path.h:612
const auto & materialize() const
Definition: access_path.h:832
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:389
auto & eq_ref()
Definition: access_path.h:596
JoinType join_type
Definition: access_path.h:1173
const auto & limit_offset() const
Definition: access_path.h:816
MaterializePathParameters * param
Definition: access_path.h:1272
const char * cause
Definition: access_path.h:1155
auto & update_rows()
Definition: access_path.h:908
auto & index_merge()
Definition: access_path.h:652
auto & aggregate()
Definition: access_path.h:796
struct AccessPath::@67::@104 materialize_information_schema_table
double m_cost
Expected cost to read all of this access path once.
Definition: access_path.h:930
const auto & group_index_skip_scan() const
Definition: access_path.h:688
auto & zero_rows()
Definition: access_path.h:732
AccessPath * subquery_path
Definition: access_path.h:1239
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1285
const auto & nested_loop_join() const
Definition: access_path.h:768
bool already_expanded_predicates
Definition: access_path.h:1183
const auto & remove_duplicates_on_index() const
Definition: access_path.h:880
Temp_table_param * temp_table_param
Definition: access_path.h:1241
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:772
bool reverse
Definition: access_path.h:976
AccessPath * table_scan_path
Definition: access_path.h:1328
struct AccessPath::@67::@113 update_rows
struct AccessPath::@67::@98 sort
enum tablesample_type sampling_type
Definition: access_path.h:970
const auto & materialize_information_schema_table() const
Definition: access_path.h:840
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1274
bool geometry
Definition: access_path.h:1060
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:331
unsigned loosescan_key_len
Definition: access_path.h:1325
auto & index_distance_scan()
Definition: access_path.h:572
auto & ref()
Definition: access_path.h:580
struct AccessPath::@67::@100 temptable_aggregate
double first_row_cost() const
The cost of reading the first row.
Definition: access_path.h:402
table_map tables_to_delete_from
Definition: access_path.h:1340
struct AccessPath::@67::@69 sample_scan
double init_once_cost() const
Definition: access_path.h:406
IndexSkipScanParameters * param
Definition: access_path.h:1115
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:487
Mem_root_array< AccessPath * > * children
Definition: access_path.h:1073
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:496
AccessPath * table_path
Definition: access_path.h:1133
const auto & aggregate() const
Definition: access_path.h:800
TABLE * table
Definition: access_path.h:965
auto & append()
Definition: access_path.h:844
double cost_before_filter() const
Definition: access_path.h:408
const auto & weedout() const
Definition: access_path.h:864
const auto & cache_invalidator() const
Definition: access_path.h:896
double sampling_percentage
Definition: access_path.h:969
bool needs_buffering
Definition: access_path.h:1293
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:383
QUICK_RANGE ** ranges
Definition: access_path.h:1035
bool is_unique
Definition: access_path.h:1003
const auto & alternative() const
Definition: access_path.h:888
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:306
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:315
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:321
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:308
struct AccessPath::@67::@103 materialize
auto & const_table()
Definition: access_path.h:620
unsigned mrr_length_per_rec
Definition: access_path.h:1174
struct AccessPath::@67::@76 full_text_search
const auto & rowid_intersection() const
Definition: access_path.h:664
Mem_root_array< Item_values_column * > * output_refs
Definition: access_path.h:1139
auto & index_range_scan()
Definition: access_path.h:644
double num_output_rows() const
Definition: access_path.h:917
double num_output_rows_before_filter
If no filter, identical to num_output_rows.
Definition: access_path.h:446
size_t signature
Signature used to uniquely identify the access path.
Definition: access_path.h:545
const auto & ref_or_null() const
Definition: access_path.h:592
struct AccessPath::@67::@95 nested_loop_join
Filesort * filesort
Definition: access_path.h:1222
struct AccessPath::@67::@105 append
bool store_rowids
Definition: access_path.h:1167
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:776
Definition: access_path.h:190
JOIN * join
Definition: access_path.h:192
AccessPath * path
Definition: access_path.h:191
Definition: group_index_skip_scan_plan.h:45
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:73
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:67
A struct containing a join hypergraph of a single query block, encapsulating the constraints given by...
Definition: make_join_hypergraph.h:98
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:80
FunctionalDependencySet functional_dependencies
Definition: access_path.h:97
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:102
RelationalExpression * expr
Definition: access_path.h:81
double selectivity
Definition: access_path.h:82
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:120
std::span< Item * > semijoin_group
Definition: access_path.h:125
size_t estimated_bytes_per_row
Definition: access_path.h:86
Definition: range_optimizer.h:55
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:83
Definition: materialize_path_parameters.h:42
AccessPath * subquery_path
Definition: materialize_path_parameters.h:43
int select_number
Definition: materialize_path_parameters.h:44
JOIN * join
Definition: materialize_path_parameters.h:45
bool copy_items
Definition: materialize_path_parameters.h:47
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:48
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:46
Definition: materialize_path_parameters.h:40
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:80
DedupType deduplication_reason
Definition: materialize_path_parameters.h:111
Mem_root_array< Operand > m_operands
Definition: materialize_path_parameters.h:58
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:65
DedupType
The context for which deduplication is being used.
Definition: materialize_path_parameters.h:106
@ NO_DEDUP
Definition: materialize_path_parameters.h:109
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:99
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:92
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:62
int ref_slice
Definition: materialize_path_parameters.h:74
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:68
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:59
Definition: table.h:298
A position of table within a join order.
Definition: sql_select.h:355
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:133
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:146
bool was_join_condition
Definition: access_path.h:159
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:182
bool possibly_null_complemented_later
Definition: access_path.h:170
FunctionalDependencySet functional_dependencies
Definition: access_path.h:181
int source_multiple_equality_idx
Definition: access_path.h:178
hypergraph::NodeMap used_nodes
Definition: access_path.h:137
Item * condition
Definition: access_path.h:134
double selectivity
Definition: access_path.h:148
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:187
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:155
Type for signature generation and for retrieving nrows estimate from secondary engine for current Acc...
Definition: handler.h:2477
Definition: table.h:1435
tablesample_type
Definition: tablesample.h:27