MySQL 9.6.0
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2025, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
26
27#include <assert.h>
28#include <stdint.h>
29#include <cmath>
30#include <cstddef>
31#include <span>
32#include <type_traits>
33#include <utility>
34#include <vector>
35
36#include "my_alloc.h"
37#include "my_base.h"
38#include "my_table_map.h"
39#include "sql/item.h"
40// IWYU suggests removing row_iterator.h, but then the inlined short form of
41// CreateIteratorFromAccessPath() fails to compile. So use a pragma to keep it.
42#include "sql/iterators/row_iterator.h" // IWYU pragma: keep
47#include "sql/join_type.h"
48#include "sql/mem_root_array.h"
49#include "sql/olap.h"
50#include "sql/sql_class.h"
51#include "sql/table.h"
52
54class Filesort;
56class Item_func_match;
57class JOIN;
58class KEY;
60class QEP_TAB;
61class QUICK_RANGE;
62class SJ_TMP_TABLE;
63class Table_function;
65class Window;
66struct AccessPath;
69struct Index_lookup;
70struct KEY_PART;
71struct POSITION;
73
74/**
75 A specification that two specific relational expressions
76 (e.g., two tables, or a table and a join between two other tables)
77 should be joined together. The actual join conditions, if any,
78 live inside the “expr” object, as does the join type etc.
79 */
83
84 // If this join is made using a hash join, estimates the width
85 // of each row as stored in the hash table, in bytes.
87
88 // The set of (additional) functional dependencies that are active
89 // after this join predicate has been applied. E.g. if we're joining
90 // on t1.x = t2.x, there will be a bit for that functional dependency.
91 // We don't currently support more complex join conditions, but there's
92 // no conceptual reason why we couldn't, e.g. a join on a = b + c
93 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
94 // or {a, c} → b.
95 //
96 // Used in the processing of interesting orders.
98
99 // A less compact form of functional_dependencies, used during building
100 // (FunctionalDependencySet bitmaps are only available after all functional
101 // indexes have been collected and Build() has been called).
103
104 // A semijoin on the following format:
105 //
106 // SELECT ... FROM t1 WHERE EXISTS
107 // (SELECT ... FROM t2 WHERE t1.f1=t2.f2 AND t1.f3=t2.f4 ... t1.fn=t2.fm)
108 //
109 // may be transformed into an equivalent inner join:
110 //
111 // SELECT ... FROM (SELECT DISTINCT f2, f4...fm FROM t2) d JOIN t1
112 // ON t1.f1=d.f2 AND t1.f3=d.f4 ... t1.fn=d.fm
113 //
114 // If this is a suitable semijoin: This field will identify the the
115 // grouping given by (f2, f4..fm). (@see
116 // LogicalOrderings::RemapOrderingIndex() for a description of how this
117 // value can be mapped to an actual ordering). The join
118 // optimizer will then consider deduplicating on it and applying the
119 // above transform. If no such grouping was found, this field will be -1.
121
122 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
123 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
124 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
125 std::span<Item *> semijoin_group{};
126};
127
128/**
129 A filter of some sort that is not a join condition (those are stored
130 in JoinPredicate objects). AND conditions are typically split up into
131 multiple Predicates.
132 */
133struct Predicate {
135
136 // condition->used_tables(), converted to a NodeMap.
138
139 // tables referred to by the condition, plus any tables whose values
140 // can null any of those tables. (Even when reordering outer joins,
141 // at least one of those tables will still be present on the
142 // left-hand side of the outer join, so this is sufficient.)
143 //
144 // As a special case, we allow setting RAND_TABLE_BIT, even though it
145 // is normally part of a table_map, not a NodeMap.
147
149
150 // Whether this predicate is a join condition after all; it was promoted
151 // to a WHERE predicate since it was part of a cycle (see the comment in
152 // AddCycleEdges()). If it is, it is usually ignored so that we don't
153 // double-apply join conditions -- but if the join in question was not
154 // applied (because the cycle was broken at this point), the predicate
155 // would come into play. This is normally registered on the join itself
156 // (see RelationalExpression::join_predicate_bitmap), but having the bit
157 // on the predicate itself is used to avoid trying to push it down as a
158 // sargable predicate.
159 bool was_join_condition = false;
160
161 // Whether this predicate references tables that could be NULL-complemented
162 // later by an outer join. This could for example be true for degenerate outer
163 // join conditions that are pushed down as a table filter on one of the inner
164 // tables, or for join conditions in inner joins that are on the inner side of
165 // an outer join.
166 //
167 // We keep track of this here in order to prevent collection of functional
168 // dependencies from such predicates if the functional dependencies are not
169 // valid after the outer join.
171
172 // If this is a join condition that came from a multiple equality,
173 // and we have decided to create a mesh from that multiple equality,
174 // returns the index of it into the “multiple_equalities” array
175 // in MakeJoinHypergraph(). (You don't actually need the array to
176 // use this; it's just an opaque index to deduplicate between different
177 // predicates.) Otherwise, -1.
179
180 // See the equivalent fields in JoinPredicate.
183
184 // The list of all subqueries referred to in this predicate, if any.
185 // The optimizer uses this to add their materialized/non-materialized
186 // costs when evaluating filters.
188};
189
193};
194
195/// To indicate that a row estimate is not yet made.
196inline constexpr double kUnknownRowCount = -1.0;
197
198/// To indicate that a cost estimate is not yet made. We use a large negative
199/// value to avoid getting a positive result if we by mistake add this to
200/// a real (positive) cost.
201inline constexpr double kUnknownCost = -1e12;
202
203/// Calculate the cost of reading the first row from an access path, given
204/// estimates for init cost, total cost and the number of rows returned.
205inline double FirstRowCost(double init_cost, double total_cost,
206 double output_rows) {
207 assert(init_cost >= 0.0);
208 assert(total_cost >= init_cost);
209 assert(output_rows >= 0.0);
210 if (output_rows <= 1.0) {
211 return total_cost;
212 }
213 return init_cost + (total_cost - init_cost) / output_rows;
214}
215
216/**
217 Access paths are a query planning structure that correspond 1:1 to iterators,
218 in that an access path contains pretty much exactly the information
219 needed to instantiate given iterator, plus some information that is only
220 needed during planning, such as costs. (The new join optimizer will extend
221 this somewhat in the future. Some iterators also need the query block,
222 ie., JOIN object, they are part of, but that is implicitly available when
223 constructing the tree.)
224
225 AccessPath objects build on a variant, ie., they can hold an access path of
226 any type (table scan, filter, hash join, sort, etc.), although only one at the
227 same time. Currently, they contain 32 bytes of base information that is common
228 to any access path (type identifier, costs, etc.), and then up to 40 bytes
229 that is type-specific (e.g. for a table scan, the TABLE object). It would be
230 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
231 does not seem to be easy without fairly large contortions.
232
233 We could have solved this by inheritance, but the fixed-size design makes it
234 possible to replace an access path when a better one is found, without
235 introducing a new allocation, which will be important when using them as a
236 planning structure.
237 */
239 enum Type : uint8_t {
240 // Basic access paths (those with no children, at least nominally).
241 // NOTE: When adding more paths to this section, also update GetBasicTable()
242 // to handle them.
262
263 // Basic access paths that don't correspond to a specific table.
270
271 // Joins.
276
277 // Composite access paths.
293
294 // Access paths that modify tables.
298
299 /// A general enum to describe the safety of a given operation.
300 /// Currently we only use this to describe row IDs, but it can easily
301 /// be reused for safety of updating a table we're reading from
302 /// (the Halloween problem), or just generally unreproducible results
303 /// (e.g. a TABLESAMPLE changing due to external factors).
304 ///
305 /// Less safe values have higher numerical values.
306 enum Safety : uint8_t {
307 /// The given operation is always safe on this access path.
308 SAFE = 0,
309
310 /// The given operation is safe if this access path is scanned once,
311 /// but not if it's scanned multiple times (e.g. used on the inner side
312 /// of a nested-loop join). A typical example of this is a derived table
313 /// or CTE that is rematerialized on each scan, so that references to
314 /// the old values (such as row IDs) are no longer valid.
316
317 /// The given operation is unsafe on this access path, no matter how many
318 /// or few times it's scanned. Often, it may help to materialize it
319 /// (assuming the materialization itself doesn't use the operation
320 /// in question).
321 UNSAFE = 2
322 };
323
324 /// Whether it is safe to get row IDs (for sorting) from this access path.
326
327 /// Whether this access path counts as one that scans a base table,
328 /// and thus should be counted towards examined_rows. It can sometimes
329 /// seem a bit arbitrary which iterators count towards examined_rows
330 /// and which ones do not, so the only canonical reference is the tests.
331 bool count_examined_rows : 1 {false};
332
333 /// Whether this access path contains a GROUP_INDEX_SKIP_SCAN
334 bool has_group_skip_scan : 1 {false};
335
336#ifndef NDEBUG
337 /// Whether this access path is forced preferred over all others by means
338 /// of a SET DEBUG force_subplan_0x... statement.
339 bool forced_by_dbug : 1 {false};
340#endif
341
342 /// For UPDATE and DELETE statements: The node index of a table which can be
343 /// updated or deleted from immediately as the rows are read from the
344 /// iterator, if this path is only read from once. -1 if there is no such
345 /// table in this path.
346 ///
347 /// Note that this is an index into CostingReceiver's array of nodes, and is
348 /// not necessarily equal to the table number within the query block given by
349 /// Table_ref::tableno().
350 ///
351 /// The table, if any, is currently always the outermost table in the path.
352 ///
353 /// It is possible to have plans where it would be safe to operate
354 /// "immediately" on more than one table. For example, if we do a merge join,
355 /// it is safe to perform immediate deletes on tables on the inner side of the
356 /// join, since both sides are read only once. (However, we currently do not
357 /// support merge joins.)
358 ///
359 /// Another possibility is when the outer table of a nested loop join is
360 /// guaranteed to return at most one row (typically, a unique index lookup
361 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
362 /// nested loop join. But we don't to this yet.
363 ///
364 /// Hash joins read both sides exactly once, However, with hash joins, the
365 /// scans on the inner tables are not positioned on the correct row when the
366 /// result of the join is returned, so the immediate delete logic will need to
367 /// be changed to reposition the underlying scans before doing the immediate
368 /// deletes. While this can be done, it makes the benefit of immediate deletes
369 /// less obvious for these tables, and it can also be a loss in some cases,
370 /// because we lose the deduplication provided by the Unique object used for
371 /// buffered deletes (the immediate deletes could end up spending time
372 /// repositioning to already deleted rows). So we currently don't attempt to
373 /// do immediate deletes from inner tables of hash joins either.
374 ///
375 /// The outer table of a hash join can be deleted from immediately if the
376 /// inner table fits in memory. If the hash join spills to disk, though,
377 /// neither the rows of the outer table nor the rows of the inner table come
378 /// out in the order of the underlying scan, so it is not safe in general to
379 /// perform immediate deletes on the outer table of a hash join.
380 ///
381 /// If support for immediate operations on multiple tables is added,
382 /// this member could be changed from a node index to a NodeMap.
384
385 /// Which ordering the rows produced by this path follow, if any
386 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
387 /// but we don't want to add a dependency on interesting_orders.h from
388 /// this file, so we use the base type instead of the typedef here.
390
391 /// If an iterator has been instantiated for this access path, points to the
392 /// iterator. Used for constructing iterators that need to talk to each other
393 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
394 /// information in EXPLAIN ANALYZE queries.
396
397 double cost() const { return m_cost; }
398
399 double init_cost() const { return m_init_cost; }
400
401 /// The cost of reading the first row.
402 double first_row_cost() const {
404 }
405
406 double init_once_cost() const { return m_init_once_cost; }
407
408 double cost_before_filter() const { return m_cost_before_filter; }
409
410 void set_cost(double val) {
411 assert(std::isfinite(val));
412 assert(val >= 0.0 || val == kUnknownCost);
413 m_cost = val;
414 }
415
416 void set_init_cost(double val) {
417 assert(std::isfinite(val));
418 assert(val >= 0.0 || val == kUnknownCost);
419 m_init_cost = val;
420 }
421
422 void set_init_once_cost(double val) {
423 assert(std::isfinite(val));
424 assert(val >= 0.0);
425 m_init_once_cost = val;
426 }
427
428 void set_cost_before_filter(double val) {
429 assert(std::isfinite(val));
430 assert(val >= 0.0 || val == kUnknownCost);
432 }
433
434 /// Return the cost of scanning the given path for the second time
435 /// (or later) in the given query block. This is really the interesting
436 /// metric, not init_once_cost in itself, but since nearly all paths
437 /// have zero init_once_cost, storing that instead allows us to skip
438 /// a lot of repeated path->init_once_cost = path->init_cost calls
439 /// in the code.
440 double rescan_cost() const { return cost() - init_once_cost(); }
441
442 /// Return true if costs and row counts are consistent.
443 bool HasConsistentCostsAndRows(const JoinHypergraph &graph) const;
444
445 /// If no filter, identical to num_output_rows.
447
448 /// Bitmap of WHERE predicates that we are including on this access path,
449 /// referring to the “predicates” array internal to the join optimizer.
450 /// Since bit masks are much cheaper to deal with than creating Item
451 /// objects, and we don't invent new conditions during join optimization
452 /// (all of them are known when we begin optimization), we stick to
453 /// manipulating bit masks during optimization, saying which filters will be
454 /// applied at this node (a 1-bit means the filter will be applied here; if
455 /// there are multiple ones, they are ANDed together).
456 ///
457 /// This is used during join optimization only; before iterators are
458 /// created, we will add FILTER access paths to represent these instead,
459 /// removing the dependency on the array. Said FILTER paths are by
460 /// convention created with materialize_subqueries = false, since the by far
461 /// most common case is that there are no subqueries in the predicate.
462 /// In other words, if you wish to represent a filter with
463 /// materialize_subqueries = true, you will need to make an explicit FILTER
464 /// node.
465 ///
466 /// See also nested_loop_join().equijoin_predicates, which is for filters
467 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
468 ///
469 /// Note that the higher bits of this bitset, those starting at the position
470 /// given by JoinHypergraph::num_where_predicates, do not represent filter
471 /// predicates, but rather applied sargable join predicates. @see
472 /// #applied_sargable_join_predicates() for more details.
474
475 /// Bitmap of sargable join predicates that have already been applied
476 /// in this access path by means of an index lookup (ref access),
477 /// again referring to “predicates”, and thus should not be counted again
478 /// for selectivity. Note that the filter may need to be applied
479 /// nevertheless (especially in case of type conversions); see
480 /// subsumed_sargable_join_predicates.
481 ///
482 /// Since these refer to the same array as filter_predicates, they will
483 /// never overlap with filter_predicates, and so we can reuse the same
484 /// memory using an alias (a union would not be allowed, since OverflowBitset
485 /// is a class with non-trivial default constructor), even though the meaning
486 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
487 /// bits 0..(N-1) belong to filter_predicates, and the rest to
488 /// applied_sargable_join_predicates.
490 return filter_predicates;
491 }
493 return filter_predicates;
494 }
495
496 /// Bitmap of WHERE predicates that touch tables we have joined in,
497 /// but that we could not apply yet (for instance because they reference
498 /// other tables, or because because we could not push them down into
499 /// the nullable side of outer joins). Used during planning only
500 /// (see filter_predicates).
501 ///
502 /// Note that the higher bits of this bitset, those starting at the position
503 /// given by JoinHypergraph::num_where_predicates, do not represent delayed
504 /// predicates, but rather subsumed sargable join predicates. @see
505 /// #subsumed_sargable_join_predicates() for more details.
507
508 /// Similar to applied_sargable_join_predicates, bitmap of sargable
509 /// join predicates that have been applied and will subsume the join
510 /// predicate entirely, ie., not only should the selectivity not be
511 /// double-counted, but the predicate itself is redundant and need not
512 /// be applied as a filter. (It is an error to have a bit set here but not
513 /// in applied_sargable_join_predicates.)
515 return delayed_predicates;
516 }
518 return delayed_predicates;
519 }
520
521 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
522 /// loaded when rows from this access path are evaluated; that is, this
523 /// access path must be put on the inner side of a nested-loop join (or
524 /// multiple such joins) where the outer side includes all of the given
525 /// tables.
526 ///
527 /// The most obvious case for this is dependent tables in LATERAL, but a more
528 /// common case is when we have pushed join conditions referring to those
529 /// tables; e.g., if this access path represents t1 and we have a condition
530 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
531 /// be set in this bitmap. We can still join in other tables, deferring t2,
532 /// but the bit(s) will then propagate, and we cannot be on the right side of
533 /// a hash join until parameter_tables is zero again. (Also see
534 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
535 /// as an optimization.)
536 ///
537 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
538 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
539 /// that the access path is entirely noncachable, because it depends on
540 /// something nondeterministic or an outer reference, and thus can never be on
541 /// the right side of a hash join, ever.
543
544 /// Auxiliary data used by a secondary storage engine while processing the
545 /// access path during optimization and execution. The secondary storage
546 /// engine is free to store any useful information in this member, for example
547 /// extra statistics or cost estimates. The data pointed to is fully owned by
548 /// the secondary storage engine, and it is the responsibility of the
549 /// secondary engine to manage the memory and make sure it is properly
550 /// destroyed.
551 void *secondary_engine_data{nullptr};
552
553 /// Signature used to uniquely identify the access path.
554 /// 0 meaning non-initialized.
555 size_t signature{0};
556
557 // Accessors for the union below.
558 auto &table_scan() {
559 assert(type == TABLE_SCAN);
560 return u.table_scan;
561 }
562 const auto &table_scan() const {
563 assert(type == TABLE_SCAN);
564 return u.table_scan;
565 }
566 auto &sample_scan() {
567 assert(type == SAMPLE_SCAN);
568 return u.sample_scan;
569 }
570 const auto &sample_scan() const {
571 assert(type == SAMPLE_SCAN);
572 return u.sample_scan;
573 }
574 auto &index_scan() {
575 assert(type == INDEX_SCAN);
576 return u.index_scan;
577 }
578 const auto &index_scan() const {
579 assert(type == INDEX_SCAN);
580 return u.index_scan;
581 }
583 assert(type == INDEX_DISTANCE_SCAN);
584 return u.index_distance_scan;
585 }
586 const auto &index_distance_scan() const {
587 assert(type == INDEX_DISTANCE_SCAN);
588 return u.index_distance_scan;
589 }
590 auto &ref() {
591 assert(type == REF);
592 return u.ref;
593 }
594 const auto &ref() const {
595 assert(type == REF);
596 return u.ref;
597 }
598 auto &ref_or_null() {
599 assert(type == REF_OR_NULL);
600 return u.ref_or_null;
601 }
602 const auto &ref_or_null() const {
603 assert(type == REF_OR_NULL);
604 return u.ref_or_null;
605 }
606 auto &eq_ref() {
607 assert(type == EQ_REF);
608 return u.eq_ref;
609 }
610 const auto &eq_ref() const {
611 assert(type == EQ_REF);
612 return u.eq_ref;
613 }
615 assert(type == PUSHED_JOIN_REF);
616 return u.pushed_join_ref;
617 }
618 const auto &pushed_join_ref() const {
619 assert(type == PUSHED_JOIN_REF);
620 return u.pushed_join_ref;
621 }
623 assert(type == FULL_TEXT_SEARCH);
624 return u.full_text_search;
625 }
626 const auto &full_text_search() const {
627 assert(type == FULL_TEXT_SEARCH);
628 return u.full_text_search;
629 }
630 auto &const_table() {
631 assert(type == CONST_TABLE);
632 return u.const_table;
633 }
634 const auto &const_table() const {
635 assert(type == CONST_TABLE);
636 return u.const_table;
637 }
638 auto &mrr() {
639 assert(type == MRR);
640 return u.mrr;
641 }
642 const auto &mrr() const {
643 assert(type == MRR);
644 return u.mrr;
645 }
646 auto &follow_tail() {
647 assert(type == FOLLOW_TAIL);
648 return u.follow_tail;
649 }
650 const auto &follow_tail() const {
651 assert(type == FOLLOW_TAIL);
652 return u.follow_tail;
653 }
655 assert(type == INDEX_RANGE_SCAN);
656 return u.index_range_scan;
657 }
658 const auto &index_range_scan() const {
659 assert(type == INDEX_RANGE_SCAN);
660 return u.index_range_scan;
661 }
662 auto &index_merge() {
663 assert(type == INDEX_MERGE);
664 return u.index_merge;
665 }
666 const auto &index_merge() const {
667 assert(type == INDEX_MERGE);
668 return u.index_merge;
669 }
671 assert(type == ROWID_INTERSECTION);
672 return u.rowid_intersection;
673 }
674 const auto &rowid_intersection() const {
675 assert(type == ROWID_INTERSECTION);
676 return u.rowid_intersection;
677 }
678 auto &rowid_union() {
679 assert(type == ROWID_UNION);
680 return u.rowid_union;
681 }
682 const auto &rowid_union() const {
683 assert(type == ROWID_UNION);
684 return u.rowid_union;
685 }
687 assert(type == INDEX_SKIP_SCAN);
688 return u.index_skip_scan;
689 }
690 const auto &index_skip_scan() const {
691 assert(type == INDEX_SKIP_SCAN);
692 return u.index_skip_scan;
693 }
695 assert(type == GROUP_INDEX_SKIP_SCAN);
696 return u.group_index_skip_scan;
697 }
698 const auto &group_index_skip_scan() const {
699 assert(type == GROUP_INDEX_SKIP_SCAN);
700 return u.group_index_skip_scan;
701 }
704 return u.dynamic_index_range_scan;
705 }
706 const auto &dynamic_index_range_scan() const {
708 return u.dynamic_index_range_scan;
709 }
712 return u.materialized_table_function;
713 }
714 const auto &materialized_table_function() const {
716 return u.materialized_table_function;
717 }
719 assert(type == UNQUALIFIED_COUNT);
720 return u.unqualified_count;
721 }
722 const auto &unqualified_count() const {
723 assert(type == UNQUALIFIED_COUNT);
724 return u.unqualified_count;
725 }
727 assert(type == TABLE_VALUE_CONSTRUCTOR);
728 return u.table_value_constructor;
729 }
730 const auto &table_value_constructor() const {
731 assert(type == TABLE_VALUE_CONSTRUCTOR);
732 return u.table_value_constructor;
733 }
735 assert(type == FAKE_SINGLE_ROW);
736 return u.fake_single_row;
737 }
738 const auto &fake_single_row() const {
739 assert(type == FAKE_SINGLE_ROW);
740 return u.fake_single_row;
741 }
742 auto &zero_rows() {
743 assert(type == ZERO_ROWS);
744 return u.zero_rows;
745 }
746 const auto &zero_rows() const {
747 assert(type == ZERO_ROWS);
748 return u.zero_rows;
749 }
751 assert(type == ZERO_ROWS_AGGREGATED);
752 return u.zero_rows_aggregated;
753 }
754 const auto &zero_rows_aggregated() const {
755 assert(type == ZERO_ROWS_AGGREGATED);
756 return u.zero_rows_aggregated;
757 }
758 auto &hash_join() {
759 assert(type == HASH_JOIN);
760 return u.hash_join;
761 }
762 const auto &hash_join() const {
763 assert(type == HASH_JOIN);
764 return u.hash_join;
765 }
766 auto &bka_join() {
767 assert(type == BKA_JOIN);
768 return u.bka_join;
769 }
770 const auto &bka_join() const {
771 assert(type == BKA_JOIN);
772 return u.bka_join;
773 }
775 assert(type == NESTED_LOOP_JOIN);
776 return u.nested_loop_join;
777 }
778 const auto &nested_loop_join() const {
779 assert(type == NESTED_LOOP_JOIN);
780 return u.nested_loop_join;
781 }
784 return u.nested_loop_semijoin_with_duplicate_removal;
785 }
788 return u.nested_loop_semijoin_with_duplicate_removal;
789 }
790 auto &filter() {
791 assert(type == FILTER);
792 return u.filter;
793 }
794 const auto &filter() const {
795 assert(type == FILTER);
796 return u.filter;
797 }
798 auto &sort() {
799 assert(type == SORT);
800 return u.sort;
801 }
802 const auto &sort() const {
803 assert(type == SORT);
804 return u.sort;
805 }
806 auto &aggregate() {
807 assert(type == AGGREGATE);
808 return u.aggregate;
809 }
810 const auto &aggregate() const {
811 assert(type == AGGREGATE);
812 return u.aggregate;
813 }
815 assert(type == TEMPTABLE_AGGREGATE);
816 return u.temptable_aggregate;
817 }
818 const auto &temptable_aggregate() const {
819 assert(type == TEMPTABLE_AGGREGATE);
820 return u.temptable_aggregate;
821 }
822 auto &limit_offset() {
823 assert(type == LIMIT_OFFSET);
824 return u.limit_offset;
825 }
826 const auto &limit_offset() const {
827 assert(type == LIMIT_OFFSET);
828 return u.limit_offset;
829 }
830 auto &stream() {
831 assert(type == STREAM);
832 return u.stream;
833 }
834 const auto &stream() const {
835 assert(type == STREAM);
836 return u.stream;
837 }
838 auto &materialize() {
839 assert(type == MATERIALIZE);
840 return u.materialize;
841 }
842 const auto &materialize() const {
843 assert(type == MATERIALIZE);
844 return u.materialize;
845 }
848 return u.materialize_information_schema_table;
849 }
852 return u.materialize_information_schema_table;
853 }
854 auto &append() {
855 assert(type == APPEND);
856 return u.append;
857 }
858 const auto &append() const {
859 assert(type == APPEND);
860 return u.append;
861 }
862 auto &window() {
863 assert(type == WINDOW);
864 return u.window;
865 }
866 const auto &window() const {
867 assert(type == WINDOW);
868 return u.window;
869 }
870 auto &weedout() {
871 assert(type == WEEDOUT);
872 return u.weedout;
873 }
874 const auto &weedout() const {
875 assert(type == WEEDOUT);
876 return u.weedout;
877 }
879 assert(type == REMOVE_DUPLICATES);
880 return u.remove_duplicates;
881 }
882 const auto &remove_duplicates() const {
883 assert(type == REMOVE_DUPLICATES);
884 return u.remove_duplicates;
885 }
888 return u.remove_duplicates_on_index;
889 }
890 const auto &remove_duplicates_on_index() const {
892 return u.remove_duplicates_on_index;
893 }
894 auto &alternative() {
895 assert(type == ALTERNATIVE);
896 return u.alternative;
897 }
898 const auto &alternative() const {
899 assert(type == ALTERNATIVE);
900 return u.alternative;
901 }
903 assert(type == CACHE_INVALIDATOR);
904 return u.cache_invalidator;
905 }
906 const auto &cache_invalidator() const {
907 assert(type == CACHE_INVALIDATOR);
908 return u.cache_invalidator;
909 }
910 auto &delete_rows() {
911 assert(type == DELETE_ROWS);
912 return u.delete_rows;
913 }
914 const auto &delete_rows() const {
915 assert(type == DELETE_ROWS);
916 return u.delete_rows;
917 }
918 auto &update_rows() {
919 assert(type == UPDATE_ROWS);
920 return u.update_rows;
921 }
922 const auto &update_rows() const {
923 assert(type == UPDATE_ROWS);
924 return u.update_rows;
925 }
926
927 double num_output_rows() const { return m_num_output_rows; }
928
929 void set_num_output_rows(double val) {
930 assert(std::isfinite(val));
931 assert(val == kUnknownRowCount || val >= 0.0);
932 m_num_output_rows = val;
933 }
934
935 private:
936 /// Expected number of output rows.
938
939 /// Expected cost to read all of this access path once.
941
942 /// Expected cost to initialize this access path; ie., cost to read
943 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
944 /// Note that EXPLAIN prints out cost of reading the _first_ row
945 /// because it is easier for the user and also easier to measure in
946 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
947 /// initialization cost, so that is what we use in this member.
948 /// kUnknownCost for unknown.
950
951 /// Of init_cost, how much of the initialization needs only to be done
952 /// once per query block. (This is a cost, not a proportion.)
953 /// Ie., if the access path can reuse some its initialization work
954 /// if Init() is called multiple times, this member will be nonzero.
955 /// A typical example is a materialized table with rematerialize=false;
956 /// the second time Init() is called, it's a no-op. Most paths will have
957 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
958 /// We do not intend to use this field to model cache effects.
959 ///
960 /// This is currently not printed in EXPLAIN, only optimizer trace.
961 double m_init_once_cost{0.0};
962
963 /// If no filter, identical to cost. init_cost is always the same
964 /// (filters have zero initialization cost).
966
967 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
968 // It is private to force all access to be through the type-checking
969 // accessors.
970 //
971 // For information about the meaning of each value, see the corresponding
972 // row iterator constructors.
973 union {
974 struct {
977 struct {
978 TABLE *table;
982 struct {
983 TABLE *table;
984 int idx;
988 struct {
989 TABLE *table;
990 int idx;
992 bool reverse;
994 struct {
995 TABLE *table;
997 bool use_order;
998 bool reverse;
1000 struct {
1001 TABLE *table;
1003 bool use_order;
1005 struct {
1006 TABLE *table;
1009 struct {
1010 TABLE *table;
1012 bool use_order;
1015 struct {
1016 TABLE *table;
1018 bool use_order;
1022 struct {
1023 TABLE *table;
1026 struct {
1027 TABLE *table;
1033 struct {
1034 TABLE *table;
1036 struct {
1037 // The key part(s) we are scanning on. Note that this may be an array.
1038 // You can get the table we are working on by looking into
1039 // used_key_parts[0].field->table (it is not stored directly, to avoid
1040 // going over the AccessPath size limits).
1042
1043 // The actual ranges we are scanning over (originally derived from “key”).
1044 // Not a Bounds_checked_array, to save 4 bytes on the length.
1046 unsigned num_ranges;
1047
1048 unsigned mrr_flags;
1050
1051 // Which index (in the TABLE) we are scanning over, and how many of its
1052 // key parts we are using.
1053 unsigned index;
1055
1056 // If true, the scan can return rows in rowid order.
1058
1059 // If true, the scan _should_ return rows in rowid order.
1060 // Should only be set if can_be_used_for_ror == true.
1062
1063 // If true, this plan can be used for index merge scan.
1065
1066 // See row intersection for more details.
1068
1069 // Whether we are scanning over a geometry key part.
1070 bool geometry : 1;
1071
1072 // Whether we need a reverse scan. Only supported if geometry == false.
1073 bool reverse : 1;
1074
1075 // For a reverse scan, if we are using extended key parts. It is needed,
1076 // to set correct flags when retrieving records.
1079 struct {
1080 TABLE *table;
1085 struct {
1086 TABLE *table;
1088
1089 // Clustered primary key scan, if any.
1091
1092 bool forced_by_hint;
1095
1096 // If true, the first child scan should reuse table->file instead of
1097 // creating its own. This is true if the intersection is the topmost
1098 // range scan, but _not_ if it's below a union. (The reasons for this
1099 // are unknown.) It can also be negated by logic involving
1100 // retrieve_full_rows and is_covering, again for unknown reasons.
1101 //
1102 // This is not only for performance; multi-table delete has a hidden
1103 // dependency on this behavior when running against certain types of
1104 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
1105 // when deleting (and not all table types can transfer the position of one
1106 // handler to another by using position()).
1107 bool reuse_handler;
1108
1109 // true if no row retrieval phase is necessary.
1112 struct {
1113 TABLE *table;
1115 bool forced_by_hint;
1117 struct {
1118 TABLE *table;
1119 unsigned index;
1120 unsigned num_used_key_parts;
1121 bool forced_by_hint;
1122
1123 // Large, and has nontrivial destructors, so split out into
1124 // its own allocation.
1127 struct {
1128 TABLE *table;
1129 unsigned index;
1130 unsigned num_used_key_parts;
1131 bool forced_by_hint;
1132
1133 // Large, so split out into its own allocation.
1136 struct {
1137 TABLE *table;
1138 QEP_TAB *qep_tab; // Used only for buffering.
1140 struct {
1141 TABLE *table;
1145 struct {
1147
1148 struct {
1151 struct {
1152 // No members.
1154 struct {
1155 // The child is optional. It is only used for keeping track of which
1156 // tables are pruned away by this path, and it is only needed when this
1157 // path is on the inner side of an outer join. See ZeroRowsIterator for
1158 // details. The child of a ZERO_ROWS access path will not be visited by
1159 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1160 // only if called with include_pruned_tables = true. No iterator is
1161 // created for the child, and the child is not shown by EXPLAIN.
1163 // Used for EXPLAIN only.
1164 // TODO(sgunders): make an enum.
1165 const char *cause;
1167 struct {
1168 // Used for EXPLAIN only.
1169 // TODO(sgunders): make an enum.
1170 const char *cause;
1172
1173 struct {
1177 bool store_rowids; // Whether we are below a weedout or not.
1181 struct {
1186 bool store_rowids; // Whether we are below a weedout or not.
1189 struct {
1191 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1195
1196 // Equijoin filters to apply before the join, if any.
1197 // Indexes into join_predicate->expr->equijoin_conditions.
1198 // Non-equijoin conditions are always applied.
1199 // If already_expanded_predicates is true, do not re-expand.
1201
1202 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1203 // this struct needs an initializer, or the union would not be
1204 // default-constructible. If we need more than one union member
1205 // with such an initializer, we would probably need to change
1206 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1207 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1208 nullptr, {}};
1209 struct {
1211 const TABLE *table;
1213 size_t key_len;
1215
1216 struct {
1219
1220 // This parameter, unlike nearly all others, is not passed to the the
1221 // actual iterator. Instead, if true, it signifies that when creating
1222 // the iterator, all materializable subqueries in “condition” should be
1223 // materialized (with any in2exists condition removed first). In the
1224 // very rare case that there are two or more such subqueries, this is
1225 // an all-or-nothing decision, for simplicity.
1226 //
1227 // See FinalizeMaterializedSubqueries().
1230 struct {
1234
1235 // If filesort is nullptr: A new filesort will be created at the
1236 // end of optimization, using this order and flags. Otherwise: Only
1237 // used by EXPLAIN.
1244 struct {
1248 struct {
1252 TABLE *table;
1256 struct {
1258 ha_rows limit;
1262 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1263 // See Query_expression::send_records.
1266 struct {
1268 JOIN *join;
1270 TABLE *table;
1272 int ref_slice;
1274 struct {
1275 // NOTE: The only legal access paths within table_path are
1276 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1277 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1279
1280 // Large, and has nontrivial destructors, so split out
1281 // into its own allocation.
1283 /** The total cost of executing the queries that we materialize.*/
1285 /// The number of materialized rows (as opposed to the number of rows
1286 /// fetched by table_path). Needed for 'explain'.
1289 struct {
1292 Item *condition;
1294 struct {
1297 struct {
1302 int ref_slice;
1305 struct {
1310 struct {
1311 using ItemSpan = std::span<Item *>;
1313
1314 /// @cond IGNORE
1315 // These functions somehow triggers a doxygen warning. (Presumably
1316 // a doxygen bug.)
1317 ItemSpan &group_items() {
1318 return reinterpret_cast<ItemSpan &>(m_group_items);
1319 }
1320
1321 const ItemSpan &group_items() const {
1322 return reinterpret_cast<const ItemSpan &>(m_group_items);
1323 }
1324 /// @endcond
1325
1326 private:
1327 // gcc 11 does not support a span as a union member. Replace this
1328 // with "std::span<Item *> group_items" when we move to newer gcc version.
1329 alignas(alignof(ItemSpan)) std::byte m_group_items[sizeof(ItemSpan)];
1331 struct {
1333 TABLE *table;
1334 KEY *key;
1337 struct {
1339
1340 // For the ref.
1344 struct {
1346 const char *name;
1348 struct {
1353 struct {
1358 } u;
1359};
1361 "AccessPath must be trivially destructible, as it is allocated "
1362 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1363 "(because multiple candidates during planning could point to "
1364 "the same access paths, and refcounting would be expensive)");
1365static_assert(sizeof(AccessPath) <= 152,
1366 "We are creating a lot of access paths in the join "
1367 "optimizer, so be sure not to bloat it without noticing. "
1368 "(104 bytes for the base, 52 bytes for the variant.)");
1369
1370inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1372 to->set_cost(from.cost());
1373 to->set_init_cost(from.init_cost());
1376 to->safe_for_rowid = from.safe_for_rowid;
1377 to->ordering_state = from.ordering_state;
1379 to->signature = from.signature;
1380}
1381
1382/// Return the name of an AccessPath::Type enumerator.
1383std::string_view AccessPathTypeName(AccessPath::Type type);
1384
1385// Trivial factory functions for all of the types of access paths above.
1386
1388 bool count_examined_rows) {
1389 AccessPath *path = new (thd->mem_root) AccessPath;
1391 path->count_examined_rows = count_examined_rows;
1392 path->table_scan().table = table;
1393 return path;
1394}
1395
1397 double sampling_percentage,
1398 bool count_examined_rows) {
1399 AccessPath *path = new (thd->mem_root) AccessPath;
1401 path->count_examined_rows = count_examined_rows;
1402 path->sample_scan().table = table;
1403 path->sample_scan().sampling_percentage = sampling_percentage;
1404 return path;
1405}
1406
1408 bool use_order, bool reverse,
1409 bool count_examined_rows) {
1410 AccessPath *path = new (thd->mem_root) AccessPath;
1412 path->count_examined_rows = count_examined_rows;
1413 path->index_scan().table = table;
1414 path->index_scan().idx = idx;
1415 path->index_scan().use_order = use_order;
1416 path->index_scan().reverse = reverse;
1417 return path;
1418}
1419
1421 bool use_order, bool reverse,
1422 bool count_examined_rows) {
1423 AccessPath *path = new (thd->mem_root) AccessPath;
1424 path->type = AccessPath::REF;
1425 path->count_examined_rows = count_examined_rows;
1426 path->ref().table = table;
1427 path->ref().ref = ref;
1428 path->ref().use_order = use_order;
1429 path->ref().reverse = reverse;
1430 return path;
1431}
1432
1434 Index_lookup *ref, bool use_order,
1435 bool count_examined_rows) {
1436 AccessPath *path = new (thd->mem_root) AccessPath;
1438 path->count_examined_rows = count_examined_rows;
1439 path->ref_or_null().table = table;
1440 path->ref_or_null().ref = ref;
1441 path->ref_or_null().use_order = use_order;
1442 return path;
1443}
1444
1446 bool count_examined_rows) {
1447 AccessPath *path = new (thd->mem_root) AccessPath;
1448 path->type = AccessPath::EQ_REF;
1449 path->count_examined_rows = count_examined_rows;
1450 path->eq_ref().table = table;
1451 path->eq_ref().ref = ref;
1452 return path;
1453}
1454
1456 Index_lookup *ref, bool use_order,
1457 bool is_unique,
1458 bool count_examined_rows) {
1459 AccessPath *path = new (thd->mem_root) AccessPath;
1461 path->count_examined_rows = count_examined_rows;
1462 path->pushed_join_ref().table = table;
1463 path->pushed_join_ref().ref = ref;
1464 path->pushed_join_ref().use_order = use_order;
1465 path->pushed_join_ref().is_unique = is_unique;
1466 return path;
1467}
1468
1471 Item_func_match *ft_func,
1472 bool use_order, bool use_limit,
1473 bool count_examined_rows) {
1474 AccessPath *path = new (thd->mem_root) AccessPath;
1476 path->count_examined_rows = count_examined_rows;
1477 path->full_text_search().table = table;
1478 path->full_text_search().ref = ref;
1479 path->full_text_search().use_order = use_order;
1480 path->full_text_search().use_limit = use_limit;
1481 path->full_text_search().ft_func = ft_func;
1482 return path;
1483}
1484
1487 bool count_examined_rows) {
1488 AccessPath *path = new (thd->mem_root) AccessPath;
1490 path->count_examined_rows = count_examined_rows;
1491 path->set_num_output_rows(1.0);
1492 path->set_cost(0.0);
1493 path->set_init_cost(0.0);
1494 path->set_init_once_cost(0.0);
1495 path->const_table().table = table;
1496 path->const_table().ref = ref;
1497 return path;
1498}
1499
1501 int mrr_flags) {
1502 AccessPath *path = new (thd->mem_root) AccessPath;
1503 path->type = AccessPath::MRR;
1504 path->mrr().table = table;
1505 path->mrr().ref = ref;
1506 path->mrr().mrr_flags = mrr_flags;
1507
1508 // This will be filled in when the BKA iterator is created.
1509 path->mrr().bka_path = nullptr;
1510
1511 return path;
1512}
1513
1515 bool count_examined_rows) {
1516 AccessPath *path = new (thd->mem_root) AccessPath;
1518 path->count_examined_rows = count_examined_rows;
1519 path->follow_tail().table = table;
1520 return path;
1521}
1522
1524 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1525 AccessPath *path = new (thd->mem_root) AccessPath;
1527 path->count_examined_rows = count_examined_rows;
1528 path->dynamic_index_range_scan().table = table;
1529 path->dynamic_index_range_scan().qep_tab = qep_tab;
1530 return path;
1531}
1532
1534 THD *thd, TABLE *table, Table_function *table_function,
1535 AccessPath *table_path) {
1536 AccessPath *path = new (thd->mem_root) AccessPath;
1538 path->materialized_table_function().table = table;
1539 path->materialized_table_function().table_function = table_function;
1540 path->materialized_table_function().table_path = table_path;
1541 return path;
1542}
1543
1545 AccessPath *path = new (thd->mem_root) AccessPath;
1547 return path;
1548}
1549
1551 const JOIN *join);
1552
1554 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1555 KEY *key, size_t key_len) {
1556 AccessPath *path = new (thd->mem_root) AccessPath;
1558 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1559 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1560 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1561 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1562 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1563 path->has_group_skip_scan =
1565 return path;
1566}
1567
1569 Item *condition) {
1570 AccessPath *path = new (thd->mem_root) AccessPath;
1571 path->type = AccessPath::FILTER;
1572 path->filter().child = child;
1573 path->filter().condition = condition;
1574 path->filter().materialize_subqueries = false;
1575 path->has_group_skip_scan = child->has_group_skip_scan;
1576 return path;
1577}
1578
1579// Not inline, because it needs access to filesort internals
1580// (which are forward-declared in this file).
1582 ORDER *order, bool count_examined_rows);
1583
1585 olap_type olap) {
1586 AccessPath *path = new (thd->mem_root) AccessPath;
1588 path->aggregate().child = child;
1589 path->aggregate().olap = olap;
1590 path->has_group_skip_scan = child->has_group_skip_scan;
1591 return path;
1592}
1593
1595 THD *thd, AccessPath *subquery_path, JOIN *join,
1596 Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path,
1597 int ref_slice) {
1598 AccessPath *path = new (thd->mem_root) AccessPath;
1600 path->temptable_aggregate().subquery_path = subquery_path;
1601 path->temptable_aggregate().join = join;
1602 path->temptable_aggregate().temp_table_param = temp_table_param;
1603 path->temptable_aggregate().table = table;
1604 path->temptable_aggregate().table_path = table_path;
1605 path->temptable_aggregate().ref_slice = ref_slice;
1606 return path;
1607}
1608
1610 ha_rows limit, ha_rows offset,
1611 bool count_all_rows,
1612 bool reject_multiple_rows,
1613 ha_rows *send_records_override) {
1615 AccessPath *path = new (thd->mem_root) AccessPath;
1617 path->immediate_update_delete_table = child->immediate_update_delete_table;
1618 path->limit_offset().child = child;
1619 path->limit_offset().limit = limit;
1620 path->limit_offset().offset = offset;
1621 path->limit_offset().count_all_rows = count_all_rows;
1622 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1623 path->limit_offset().send_records_override = send_records_override;
1624 CopyBasicProperties(*child, path);
1626 return path;
1627}
1628
1630 bool count_examined_rows) {
1631 AccessPath *path = new (thd->mem_root) AccessPath;
1633 path->count_examined_rows = count_examined_rows;
1634 path->set_num_output_rows(1.0);
1635 path->set_cost(0.0);
1636 path->set_init_cost(0.0);
1637 path->set_init_once_cost(0.0);
1638 return path;
1639}
1640
1642 const char *cause) {
1643 AccessPath *path = new (thd->mem_root) AccessPath;
1645 path->zero_rows().child = child;
1646 path->zero_rows().cause = cause;
1647 path->set_num_output_rows(0.0);
1648 path->set_cost(0.0);
1649 path->set_init_cost(0.0);
1650 path->set_init_once_cost(0.0);
1651 path->num_output_rows_before_filter = 0.0;
1652 path->set_cost_before_filter(0.0);
1653 return path;
1654}
1655
1656inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1657 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1658}
1659
1661 const char *cause) {
1662 AccessPath *path = new (thd->mem_root) AccessPath;
1664 path->zero_rows_aggregated().cause = cause;
1665 path->set_num_output_rows(1.0);
1666 path->set_cost(0.0);
1667 path->set_init_cost(0.0);
1668 return path;
1669}
1670
1672 Temp_table_param *temp_table_param,
1673 TABLE *table, int ref_slice);
1674
1677 JOIN *join, bool copy_items,
1678 Temp_table_param *temp_table_param) {
1679 assert(path != nullptr);
1681 MaterializePathParameters::Operand &operand = array[0];
1682 operand.subquery_path = path;
1683 operand.select_number = select_number;
1684 operand.join = join;
1686 operand.copy_items = copy_items;
1687 operand.temp_table_param = temp_table_param;
1688 return array;
1689}
1690
1694 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1695 int ref_slice, bool rematerialize, ha_rows limit_rows,
1696 bool reject_multiple_rows,
1701 param->m_operands = std::move(operands);
1702 if (rematerialize) {
1703 // There's no point in adding invalidators if we're rematerializing
1704 // every time anyway.
1705 param->invalidators = nullptr;
1706 } else {
1707 param->invalidators = invalidators;
1708 }
1709 param->table = table;
1710 param->cte = cte;
1711 param->unit = unit;
1712 param->ref_slice = ref_slice;
1713 param->rematerialize = rematerialize;
1714 param->limit_rows = (table == nullptr || table->is_union_or_table()
1715 ? limit_rows
1716 :
1717 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1718 // see its constructor
1719 HA_POS_ERROR);
1720 param->reject_multiple_rows = reject_multiple_rows;
1721 param->deduplication_reason = dedup_reason;
1722
1723#ifndef NDEBUG
1724 for (MaterializePathParameters::Operand &operand : param->m_operands) {
1725 assert(operand.subquery_path != nullptr);
1726 }
1727#endif
1728
1729 AccessPath *path = new (thd->mem_root) AccessPath;
1731 path->materialize().table_path = table_path;
1732 path->materialize().param = param;
1733 path->materialize().subquery_cost = kUnknownCost;
1734 path->materialize().subquery_rows = kUnknownRowCount;
1735 if (rematerialize) {
1736 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1737 } else {
1738 // The default; this is just to be explicit in the code.
1739 path->safe_for_rowid = AccessPath::SAFE;
1740 }
1741 return path;
1742}
1743
1745 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1746 AccessPath *path = new (thd->mem_root) AccessPath;
1748 path->materialize_information_schema_table().table_path = table_path;
1749 path->materialize_information_schema_table().table_list = table_list;
1750 path->materialize_information_schema_table().condition = condition;
1751 return path;
1752}
1753
1754/// Add path costs c1 and c2, but handle kUnknownCost correctly.
1755inline double AddCost(double c1, double c2) {
1756 // If one is undefined, use the other, as we have nothing else.
1757 if (c1 == kUnknownCost) {
1758 return c2;
1759 } else if (c2 == kUnknownCost) {
1760 return c1;
1761 } else {
1762 return c1 + c2;
1763 }
1764}
1765
1766/// Add row counts c1 and c2, but handle kUnknownRowCount correctly.
1767inline double AddRowCount(double c1, double c2) {
1768 // If one is undefined, use the other, as we have nothing else.
1769 if (c1 == kUnknownRowCount) {
1770 return c2;
1771 } else if (c2 == kUnknownRowCount) {
1772 return c1;
1773 } else {
1774 return c1 + c2;
1775 }
1776}
1777
1778// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1779// long as the access path.
1781 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1782 AccessPath *path = new (thd->mem_root) AccessPath;
1783 path->type = AccessPath::APPEND;
1784 path->append().children = children;
1785 double num_output_rows = kUnknownRowCount;
1786 for (const AppendPathParameters &child : *children) {
1787 path->set_cost(AddCost(path->cost(), child.path->cost()));
1788 path->set_init_cost(AddCost(path->init_cost(), child.path->init_cost()));
1789 path->set_init_once_cost(path->init_once_cost() +
1790 child.path->init_once_cost());
1791 num_output_rows =
1792 AddRowCount(num_output_rows, child.path->num_output_rows());
1793 }
1794 path->set_num_output_rows(num_output_rows);
1795 return path;
1796}
1797
1799 Window *window,
1800 Temp_table_param *temp_table_param,
1801 int ref_slice, bool needs_buffering) {
1802 AccessPath *path = new (thd->mem_root) AccessPath;
1803 path->type = AccessPath::WINDOW;
1804 path->window().child = child;
1805 path->window().window = window;
1806 path->window().temp_table = nullptr;
1807 path->window().temp_table_param = temp_table_param;
1808 path->window().ref_slice = ref_slice;
1809 path->window().needs_buffering = needs_buffering;
1810 path->set_num_output_rows(child->num_output_rows());
1811 return path;
1812}
1813
1815 SJ_TMP_TABLE *weedout_table) {
1816 AccessPath *path = new (thd->mem_root) AccessPath;
1817 path->type = AccessPath::WEEDOUT;
1818 path->weedout().child = child;
1819 path->weedout().weedout_table = weedout_table;
1820 path->weedout().tables_to_get_rowid_for =
1821 0; // Must be handled by the caller.
1822 return path;
1823}
1824
1826 THD *thd, AccessPath *child, std::span<Item *> group_items) {
1827 AccessPath *path = new (thd->mem_root) AccessPath;
1829 path->remove_duplicates().child = child;
1830 path->remove_duplicates().group_items() = group_items;
1831 path->has_group_skip_scan = child->has_group_skip_scan;
1832 return path;
1833}
1834
1836 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1837 unsigned loosescan_key_len) {
1838 AccessPath *path = new (thd->mem_root) AccessPath;
1840 path->remove_duplicates_on_index().child = child;
1841 path->remove_duplicates_on_index().table = table;
1842 path->remove_duplicates_on_index().key = key;
1843 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1844 path->has_group_skip_scan = child->has_group_skip_scan;
1845 return path;
1846}
1847
1849 AccessPath *table_scan_path,
1850 Index_lookup *used_ref) {
1851 AccessPath *path = new (thd->mem_root) AccessPath;
1853 path->alternative().table_scan_path = table_scan_path;
1854 path->alternative().child = child;
1855 path->alternative().used_ref = used_ref;
1856 return path;
1857}
1858
1860 const char *name) {
1861 AccessPath *path = new (thd->mem_root) AccessPath;
1863 path->cache_invalidator().child = child;
1864 path->cache_invalidator().name = name;
1865 return path;
1866}
1867
1870 table_map immediate_tables);
1871
1873 table_map update_tables,
1874 table_map immediate_tables);
1875
1876/**
1877 Modifies "path" and the paths below it so that they provide row IDs for
1878 all tables.
1879
1880 This also figures out how the row IDs should be retrieved for each table in
1881 the input to the path. If the handler of the table is positioned on the
1882 correct row while reading the input, handler::position() can be called to get
1883 the row ID from the handler. However, if the input iterator returns rows
1884 without keeping the position of the underlying handlers in sync, calling
1885 handler::position() will not be able to provide the row IDs. Specifically,
1886 hash join and BKA join do not keep the underlying handlers positioned on the
1887 right row. Therefore, this function will instruct every hash join or BKA join
1888 below "path" to maintain row IDs in the join buffer, and updating handler::ref
1889 in every input table for each row they return. Then "path" does not need to
1890 call handler::position() to get it (nor should it, since calling it would
1891 overwrite the correct row ID with a stale one).
1892
1893 The tables on which "path" should call handler::position() are stored in a
1894 `tables_to_get_rowid_for` bitset in "path". For all the other tables, it can
1895 assume that handler::ref already contains the correct row ID.
1896 */
1898
1901 bool eligible_for_batch_mode);
1902
1903// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1904// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1905// that does anything else is DynamicRangeIterator.)
1907 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1909 eligible_for_batch_mode);
1910}
1911
1912void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1913 const POSITION *pos, bool is_after_filter,
1914 AccessPath *path);
1915
1916/**
1917 Return the TABLE* referred from 'path' if it is a basic access path,
1918 else a nullptr is returned. Temporary tables, such as those used by
1919 sorting, aggregate and subquery materialization are not returned.
1920*/
1922
1923/**
1924 Applies the secondary storage engine nrows modification function, if any.
1925
1926 @param params input params for the callback function.
1927 Refer to typedef for the actual input parameters explainations.
1928
1929 @return true if secondary engine has proposed modification to ap's nrows.
1930*/
1932 const SecondaryEngineNrowsParameters &params);
1933
1934/**
1935 Returns whether SecondaryNrows is applicable given the parameters.
1936
1937 @param path access path to be verified.
1938 @param thd current query thd.
1939 @param graph current query block hypergraph.
1940
1941 @return true if nrows hook is applicable.
1942*/
1944 const JoinHypergraph *graph);
1945
1946/**
1947 Returns whether SecondaryNrows hook enabled and is applicable given the
1948 parameters.
1949
1950 @param path access path to be verified.
1951 @param thd current query thd.
1952 @param graph current query block hypergraph.
1953
1954 @return true if nrows hook enabled and is applicable.
1955*/
1957 const JoinHypergraph *graph);
1958
1959/**
1960 Returns a map of all tables read when `path` or any of its children are
1961 executed. Only iterators that are part of the same query block as `path`
1962 are considered.
1963
1964 If a table is read that doesn't have a map, specifically the temporary
1965 tables made as part of materialization within the same query block,
1966 RAND_TABLE_BIT will be set as a convention and none of that access path's
1967 children will be included in the map. In this case, the caller will need to
1968 manually go in and find said access path, to ask it for its TABLE object.
1969
1970 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1971 access path (ie., pruned away due to impossible join conditions) will be
1972 included in the map. This is normally what you want, as those tables need to
1973 be included whenever you store NULL flags and the likes, but if you don't
1974 want them (perhaps to specifically check for conditions referring to pruned
1975 tables), you can set it to false.
1976 */
1977table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1978
1979/**
1980 Find the list of all tables used by this root, stopping at materializations.
1981 Used for knowing which tables to sort.
1982 */
1984
1985/**
1986 For each access path in the (sub)tree rooted at “path”, expand any use of
1987 “filter_predicates” into newly-inserted FILTER access paths, using the given
1988 predicate list. This is used after finding an optimal set of access paths,
1989 to normalize the tree so that the remaining consumers do not need to worry
1990 about filter_predicates and cost_before_filter.
1991
1992 “join” is the join that “path” is part of.
1993 */
1994void ExpandFilterAccessPaths(THD *thd, const JoinHypergraph &graph,
1995 AccessPath *path, const JOIN *join);
1996
1997/**
1998 Extracts the Item expression from the given “filter_predicates” corresponding
1999 to the given “mask”.
2000 */
2003 int num_where_predicates);
2004
2005/// Like ExpandFilterAccessPaths(), but expands only the single access path
2006/// at “path”.
2007void ExpandSingleFilterAccessPath(THD *thd, const JoinHypergraph &graph,
2008 AccessPath *path, const JOIN *join);
2009
2010/**
2011 Clear all the bits representing filter predicates in a bitset, and keep only
2012 the bits representing applied sargable join conditions.
2013
2014 See AccessPath::filter_predicates and
2015 AccessPath::applied_sargable_join_predicates() for details about how filter
2016 predicates and applied sargable join predicates are stored in different
2017 partitions of the same bitset.
2018
2019 @param predicates A bitset representing both filter predicates and applied
2020 sargable join predicates.
2021 @param num_where_predicates The number of filter predicates.
2022 @param mem_root The root on which to allocate memory, if needed.
2023
2024 @return A copy of "predicates" with only the bits for applied sargable join
2025 predicates set.
2026 */
2028 int num_where_predicates,
2030
2031/// Returns the tables that are part of a hash join.
2033
2034/**
2035 Get the conditions to put into the extra conditions of the HashJoinIterator.
2036 This includes the non-equijoin conditions, as well as any equijoin conditions
2037 on columns that are too big to include in the hash table. (The old optimizer
2038 handles equijoin conditions on long columns elsewhere, so the last part only
2039 applies to the hypergraph optimizer.)
2040
2041 @param mem_root The root on which to allocate memory, if needed.
2042 @param using_hypergraph_optimizer True if using the hypergraph optimizer.
2043 @param equijoin_conditions All the equijoin conditions of the join.
2044 @param other_conditions All the non-equijoin conditions of the join.
2045
2046 @return All the conditions to evaluate as "extra conditions" in
2047 HashJoinIterator, or nullptr on OOM.
2048 */
2050 MEM_ROOT *mem_root, bool using_hypergraph_optimizer,
2051 const std::vector<HashJoinCondition> &equijoin_conditions,
2052 const Mem_root_array<Item *> &other_conditions);
2053
2054/**
2055 Update status variables which count how many scans of various types are used
2056 in a query plan.
2057
2058 The following status variables are updated: Select_scan, Select_full_join,
2059 Select_range, Select_full_range_join, Select_range_check. They are also stored
2060 as performance schema statement events with the same names.
2061
2062 In addition, the performance schema statement events NO_INDEX_USED and
2063 NO_GOOD_INDEX_USED are updated, if appropriate.
2064 */
2065void CollectStatusVariables(THD *thd, const JOIN *top_join,
2066 const AccessPath &top_path);
2067
2068#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
constexpr double kUnknownCost
To indicate that a cost estimate is not yet made.
Definition: access_path.h:201
bool ApplySecondaryEngineNrowsHook(const SecondaryEngineNrowsParameters &params)
Applies the secondary storage engine nrows modification function, if any.
Definition: access_path.cc:133
void ExpandSingleFilterAccessPath(THD *thd, const JoinHypergraph &graph, AccessPath *path, const JOIN *join)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1743
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.cc:148
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1859
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, std::span< Item * > group_items)
Definition: access_path.h:1825
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1420
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:201
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1370
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1469
double AddRowCount(double c1, double c2)
Add row counts c1 and c2, but handle kUnknownRowCount correctly.
Definition: access_path.h:1767
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map update_tables, table_map immediate_tables)
Definition: access_path.cc:213
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1897
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1523
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::Operand > operands, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows, MaterializePathParameters::DedupType dedup_reason=MaterializePathParameters::NO_DEDUP)
Definition: access_path.h:1691
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1660
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1848
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1500
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:450
bool IsSecondaryEngineNrowsHookApplicable(AccessPath *path, THD *thd, const JoinHypergraph *graph)
Returns whether SecondaryNrows is applicable given the parameters.
Definition: access_path.cc:101
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1780
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1553
std::string_view AccessPathTypeName(AccessPath::Type type)
Return the name of an AccessPath::Type enumerator.
Definition: access_path.cc:351
void CollectStatusVariables(THD *thd, const JOIN *top_join, const AccessPath &top_path)
Update status variables which count how many scans of various types are used in a query plan.
Definition: access_path.cc:1911
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1594
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1835
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1629
AccessPath * NewTableValueConstructorAccessPath(const THD *thd, const JOIN *join)
Definition: access_path.cc:245
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1732
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1568
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:196
void ExpandFilterAccessPaths(THD *thd, const JoinHypergraph &graph, AccessPath *path, const JOIN *join)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1879
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:166
bool IsSecondaryNrowsHookEnabledAndApplicable(AccessPath *path, THD *thd, const JoinHypergraph *graph)
Returns whether SecondaryNrows hook enabled and is applicable given the parameters.
Definition: access_path.cc:117
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1533
const Mem_root_array< Item * > * GetExtraHashJoinConditions(MEM_ROOT *mem_root, bool using_hypergraph_optimizer, const std::vector< HashJoinCondition > &equijoin_conditions, const Mem_root_array< Item * > &other_conditions)
Get the conditions to put into the extra conditions of the HashJoinIterator.
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:481
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1387
AccessPath * NewSampleScanAccessPath(THD *thd, TABLE *table, double sampling_percentage, bool count_examined_rows)
Definition: access_path.h:1396
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, olap_type olap)
Definition: access_path.h:1584
double AddCost(double c1, double c2)
Add path costs c1 and c2, but handle kUnknownCost correctly.
Definition: access_path.h:1755
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1744
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1568
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:294
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1433
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1609
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1445
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1798
double FirstRowCost(double init_cost, double total_cost, double output_rows)
Calculate the cost of reading the first row from an access path, given estimates for init cost,...
Definition: access_path.h:205
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1514
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1485
MutableOverflowBitset ClearFilterPredicates(OverflowBitset predicates, int num_where_predicates, MEM_ROOT *mem_root)
Clear all the bits representing filter predicates in a bitset, and keep only the bits representing ap...
Definition: access_path.cc:1888
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:717
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1814
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1455
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1641
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1544
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1407
Mem_root_array< MaterializePathParameters::Operand > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1676
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4605
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:54
Sorting related info.
Definition: filesort.h:52
A class that represents a join condition in a hash join.
Definition: item_cmpfunc.h:92
Definition: item_func.h:3603
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:928
Definition: sql_optimizer.h:133
Definition: key.h:113
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:432
Definition: overflow_bitset.h:179
Definition: overflow_bitset.h:81
Definition: sql_executor.h:256
Definition: range_optimizer.h:69
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:643
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
Definition: sql_executor.h:95
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:40
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2952
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:97
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:110
static MEM_ROOT mem_root
Definition: client_plugin.cc:114
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a WINDOW AccessPath.
Definition: cost_model.cc:1670
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:367
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:2009
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:63
JoinType
Definition: join_type.h:28
unsigned char byte
Blob class.
Definition: common.h:151
static mi_bit_type mask[]
Definition: mi_packrec.cc:141
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:480
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1228
#define HA_POS_ERROR
Definition: my_base.h:1230
uint64_t table_map
Definition: my_table_map.h:30
static char * path
Definition: mysqldump.cc:150
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:40
ValueType value(const std::optional< ValueType > &v)
Definition: gtid.h:83
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:132
std::string join(const detail::range auto &rng, std::string_view delim)
join elements of a range into a string separated by a delimiter.
Definition: string.h:74
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:39
olap_type
Definition: olap.h:31
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
required string type
Definition: replication_group_member_actions.proto:34
case opt name
Definition: sslopt-case.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:238
auto & weedout()
Definition: access_path.h:870
auto & filter()
Definition: access_path.h:790
bool count_all_rows
Definition: access_path.h:1260
AccessPath * bka_path
Definition: access_path.h:1029
struct AccessPath::@67::@97 filter
auto & ref_or_null()
Definition: access_path.h:598
auto & materialized_table_function()
Definition: access_path.h:710
AccessPath * cpk_child
Definition: access_path.h:1090
auto & rowid_union()
Definition: access_path.h:678
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:542
const auto & bka_join() const
Definition: access_path.h:770
struct AccessPath::@67::@93 hash_join
TABLE * temp_table
Definition: access_path.h:1300
OverflowBitset equijoin_predicates
Definition: access_path.h:1200
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:440
auto & materialize()
Definition: access_path.h:838
const auto & temptable_aggregate() const
Definition: access_path.h:818
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:514
auto & index_scan()
Definition: access_path.h:574
struct AccessPath::@67::@85 group_index_skip_scan
struct AccessPath::@67::@79 follow_tail
const auto & delete_rows() const
Definition: access_path.h:914
struct AccessPath::@67::@92 zero_rows_aggregated
struct AccessPath::@67::@89 table_value_constructor
const auto & filter() const
Definition: access_path.h:794
struct AccessPath::@67::@84 index_skip_scan
struct AccessPath::@67::@77 const_table
const auto & index_distance_scan() const
Definition: access_path.h:586
auto & delete_rows()
Definition: access_path.h:910
bool reuse_handler
Definition: access_path.h:1067
KEY_PART * used_key_part
Definition: access_path.h:1041
double m_init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:949
olap_type olap
Definition: access_path.h:1246
struct AccessPath::@67::@73 ref_or_null
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:489
Item * condition
Definition: access_path.h:1218
const auto & index_merge() const
Definition: access_path.h:666
const auto & update_rows() const
Definition: access_path.h:922
double subquery_rows
The number of materialized rows (as opposed to the number of rows fetched by table_path).
Definition: access_path.h:1287
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:894
auto & pushed_join_ref()
Definition: access_path.h:614
void set_cost(double val)
Definition: access_path.h:410
AccessPath * outer
Definition: access_path.h:1174
std::span< Item * > ItemSpan
Definition: access_path.h:1311
auto & index_skip_scan()
Definition: access_path.h:686
bool rewrite_semi_to_inner
Definition: access_path.h:1178
const auto & zero_rows_aggregated() const
Definition: access_path.h:754
auto & group_index_skip_scan()
Definition: access_path.h:694
struct AccessPath::@67::@107 weedout
const auto & eq_ref() const
Definition: access_path.h:610
bool use_order
Definition: access_path.h:985
bool pfs_batch_mode
Definition: access_path.h:1192
auto & temptable_aggregate()
Definition: access_path.h:814
GroupIndexSkipScanParameters * param
Definition: access_path.h:1134
auto & rowid_intersection()
Definition: access_path.h:670
double init_cost() const
Definition: access_path.h:399
bool materialize_subqueries
Definition: access_path.h:1228
AccessPath * child
Definition: access_path.h:1162
bool allow_spill_to_disk
Definition: access_path.h:1176
struct AccessPath::@67::@82 rowid_intersection
struct AccessPath::@67::@83 rowid_union
Index_lookup * used_ref
Definition: access_path.h:1342
auto & mrr()
Definition: access_path.h:638
std::byte m_group_items[sizeof(ItemSpan)]
Definition: access_path.h:1329
auto & sample_scan()
Definition: access_path.h:566
const auto & index_scan() const
Definition: access_path.h:578
const auto & fake_single_row() const
Definition: access_path.h:738
struct AccessPath::@67::@102 stream
bool reject_multiple_rows
Definition: access_path.h:1261
struct AccessPath::@67::@81 index_merge
bool allow_clustered_primary_key_scan
Definition: access_path.h:1082
const char * name
Definition: access_path.h:1346
bool use_limit
Definition: access_path.h:1019
Window * window
Definition: access_path.h:1299
table_map tables_to_get_rowid_for
Definition: access_path.h:1179
auto & materialize_information_schema_table()
Definition: access_path.h:846
auto & bka_join()
Definition: access_path.h:766
struct AccessPath::@67::@96 nested_loop_semijoin_with_duplicate_removal
const auto & follow_tail() const
Definition: access_path.h:650
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1307
bool has_group_skip_scan
Whether this access path contains a GROUP_INDEX_SKIP_SCAN.
Definition: access_path.h:334
Index_lookup * ref
Definition: access_path.h:996
struct AccessPath::@67::@101 limit_offset
const auto & window() const
Definition: access_path.h:866
void set_init_once_cost(double val)
Definition: access_path.h:422
const auto & materialized_table_function() const
Definition: access_path.h:714
auto & unqualified_count()
Definition: access_path.h:718
bool keep_current_rowid
Definition: access_path.h:1031
bool using_extended_key_parts
Definition: access_path.h:1077
auto & nested_loop_join()
Definition: access_path.h:774
const auto & full_text_search() const
Definition: access_path.h:626
struct AccessPath::@67::@87 materialized_table_function
bool can_be_used_for_ror
Definition: access_path.h:1057
float rec_per_key
Definition: access_path.h:1185
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:325
JOIN * join
Definition: access_path.h:1250
unsigned index
Definition: access_path.h:1053
unsigned mrr_buf_size
Definition: access_path.h:1049
auto & remove_duplicates()
Definition: access_path.h:878
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:395
const auto & stream() const
Definition: access_path.h:834
struct AccessPath::@67::@71 index_distance_scan
const auto & remove_duplicates() const
Definition: access_path.h:882
auto & window()
Definition: access_path.h:862
bool need_rows_in_rowid_order
Definition: access_path.h:1061
Table_ref * table_list
Definition: access_path.h:1291
const auto & unqualified_count() const
Definition: access_path.h:722
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:551
struct AccessPath::@67::@68 table_scan
struct AccessPath::@67::@110 alternative
void set_init_cost(double val)
Definition: access_path.h:416
void set_num_output_rows(double val)
Definition: access_path.h:929
bool is_covering
Definition: access_path.h:1110
const auto & mrr() const
Definition: access_path.h:642
void set_cost_before_filter(double val)
Definition: access_path.h:428
bool remove_duplicates
Definition: access_path.h:1240
table_map tables_to_update
Definition: access_path.h:1355
const auto & table_value_constructor() const
Definition: access_path.h:730
KEY * key
Definition: access_path.h:1212
bool HasConsistentCostsAndRows(const JoinHypergraph &graph) const
Return true if costs and row counts are consistent.
Definition: access_path.cc:1989
auto & table_value_constructor()
Definition: access_path.h:726
auto & stream()
Definition: access_path.h:830
union AccessPath::@67 u
size_t key_len
Definition: access_path.h:1213
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:517
const auto & zero_rows() const
Definition: access_path.h:746
const auto & append() const
Definition: access_path.h:858
ha_rows offset
Definition: access_path.h:1259
int idx
Definition: access_path.h:984
auto & hash_join()
Definition: access_path.h:758
auto & cache_invalidator()
Definition: access_path.h:902
struct AccessPath::@67::@90 fake_single_row
bool force_sort_rowids
Definition: access_path.h:1242
unsigned num_used_key_parts
Definition: access_path.h:1054
unsigned num_ranges
Definition: access_path.h:1046
ORDER * order
Definition: access_path.h:1238
auto & sort()
Definition: access_path.h:798
struct AccessPath::@67::@80 index_range_scan
auto & fake_single_row()
Definition: access_path.h:734
ha_rows limit
Definition: access_path.h:1239
auto & follow_tail()
Definition: access_path.h:646
double m_num_output_rows
Expected number of output rows.
Definition: access_path.h:937
const auto & sample_scan() const
Definition: access_path.h:570
const JoinPredicate * join_predicate
Definition: access_path.h:1175
const auto & const_table() const
Definition: access_path.h:634
const auto & dynamic_index_range_scan() const
Definition: access_path.h:706
const auto & table_scan() const
Definition: access_path.h:562
struct AccessPath::@67::@70 index_scan
unsigned mrr_flags
Definition: access_path.h:1048
int ref_slice
Definition: access_path.h:1254
struct AccessPath::@67::@112 delete_rows
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:473
bool can_be_used_for_imerge
Definition: access_path.h:1064
bool forced_by_hint
Definition: access_path.h:1081
auto & limit_offset()
Definition: access_path.h:822
AccessPath * inner
Definition: access_path.h:1174
QUICK_RANGE * range
Definition: access_path.h:991
bool provide_rowid
Definition: access_path.h:1271
const auto & index_range_scan() const
Definition: access_path.h:658
struct AccessPath::@67::@99 aggregate
struct AccessPath::@67::@75 pushed_join_ref
const auto & ref() const
Definition: access_path.h:594
struct AccessPath::@67::@91 zero_rows
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:339
Item_func_match * ft_func
Definition: access_path.h:1020
ha_rows * send_records_override
Definition: access_path.h:1264
auto & table_scan()
Definition: access_path.h:558
const auto & hash_join() const
Definition: access_path.h:762
QEP_TAB * qep_tab
Definition: access_path.h:1138
Table_function * table_function
Definition: access_path.h:1142
struct AccessPath::@67::@109 remove_duplicates_on_index
auto & zero_rows_aggregated()
Definition: access_path.h:750
const auto & sort() const
Definition: access_path.h:802
bool unwrap_rollup
Definition: access_path.h:1241
struct AccessPath::@67::@74 eq_ref
struct AccessPath::@67::@94 bka_join
struct AccessPath::@67::@78 mrr
Type
Definition: access_path.h:239
@ FOLLOW_TAIL
Definition: access_path.h:254
@ FILTER
Definition: access_path.h:278
@ PUSHED_JOIN_REF
Definition: access_path.h:250
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:267
@ UPDATE_ROWS
Definition: access_path.h:296
@ AGGREGATE
Definition: access_path.h:280
@ BKA_JOIN
Definition: access_path.h:274
@ ZERO_ROWS
Definition: access_path.h:266
@ CONST_TABLE
Definition: access_path.h:252
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:260
@ SAMPLE_SCAN
Definition: access_path.h:244
@ INDEX_RANGE_SCAN
Definition: access_path.h:255
@ UNQUALIFIED_COUNT
Definition: access_path.h:269
@ EQ_REF
Definition: access_path.h:249
@ FAKE_SINGLE_ROW
Definition: access_path.h:265
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:285
@ WINDOW
Definition: access_path.h:287
@ REF_OR_NULL
Definition: access_path.h:248
@ MATERIALIZE
Definition: access_path.h:284
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:273
@ ROWID_UNION
Definition: access_path.h:258
@ INDEX_SKIP_SCAN
Definition: access_path.h:259
@ MRR
Definition: access_path.h:253
@ CACHE_INVALIDATOR
Definition: access_path.h:292
@ INDEX_SCAN
Definition: access_path.h:245
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:264
@ WEEDOUT
Definition: access_path.h:288
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:268
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:290
@ TABLE_SCAN
Definition: access_path.h:243
@ REF
Definition: access_path.h:247
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:281
@ LIMIT_OFFSET
Definition: access_path.h:282
@ APPEND
Definition: access_path.h:286
@ NESTED_LOOP_JOIN
Definition: access_path.h:272
@ INDEX_MERGE
Definition: access_path.h:256
@ FULL_TEXT_SEARCH
Definition: access_path.h:251
@ ALTERNATIVE
Definition: access_path.h:291
@ STREAM
Definition: access_path.h:283
@ REMOVE_DUPLICATES
Definition: access_path.h:289
@ ROWID_INTERSECTION
Definition: access_path.h:257
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:261
@ DELETE_ROWS
Definition: access_path.h:295
@ SORT
Definition: access_path.h:279
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:246
@ HASH_JOIN
Definition: access_path.h:275
bool retrieve_full_rows
Definition: access_path.h:1093
double m_init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:961
struct AccessPath::@67::@86 dynamic_index_range_scan
const auto & rowid_union() const
Definition: access_path.h:682
const TABLE * table
Definition: access_path.h:1211
const auto & index_skip_scan() const
Definition: access_path.h:690
auto & dynamic_index_range_scan()
Definition: access_path.h:702
const auto & pushed_join_ref() const
Definition: access_path.h:618
struct AccessPath::@67::@111 cache_invalidator
auto & remove_duplicates_on_index()
Definition: access_path.h:886
table_map immediate_tables
Definition: access_path.h:1351
double m_cost_before_filter
If no filter, identical to cost.
Definition: access_path.h:965
double cost() const
Definition: access_path.h:397
struct AccessPath::@67::@88 unqualified_count
int mrr_flags
Definition: access_path.h:1030
auto & full_text_search()
Definition: access_path.h:622
const auto & materialize() const
Definition: access_path.h:842
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:389
auto & eq_ref()
Definition: access_path.h:606
JoinType join_type
Definition: access_path.h:1183
const auto & limit_offset() const
Definition: access_path.h:826
MaterializePathParameters * param
Definition: access_path.h:1282
const char * cause
Definition: access_path.h:1165
auto & update_rows()
Definition: access_path.h:918
auto & index_merge()
Definition: access_path.h:662
auto & aggregate()
Definition: access_path.h:806
struct AccessPath::@67::@104 materialize_information_schema_table
double m_cost
Expected cost to read all of this access path once.
Definition: access_path.h:940
const auto & group_index_skip_scan() const
Definition: access_path.h:698
auto & zero_rows()
Definition: access_path.h:742
AccessPath * subquery_path
Definition: access_path.h:1249
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1295
const auto & nested_loop_join() const
Definition: access_path.h:778
bool already_expanded_predicates
Definition: access_path.h:1193
const auto & remove_duplicates_on_index() const
Definition: access_path.h:890
Temp_table_param * temp_table_param
Definition: access_path.h:1251
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:782
bool reverse
Definition: access_path.h:986
AccessPath * table_scan_path
Definition: access_path.h:1338
struct AccessPath::@67::@113 update_rows
struct AccessPath::@67::@98 sort
enum tablesample_type sampling_type
Definition: access_path.h:980
const auto & materialize_information_schema_table() const
Definition: access_path.h:850
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1284
bool geometry
Definition: access_path.h:1070
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:331
unsigned loosescan_key_len
Definition: access_path.h:1335
auto & index_distance_scan()
Definition: access_path.h:582
auto & ref()
Definition: access_path.h:590
struct AccessPath::@67::@100 temptable_aggregate
double first_row_cost() const
The cost of reading the first row.
Definition: access_path.h:402
table_map tables_to_delete_from
Definition: access_path.h:1350
struct AccessPath::@67::@69 sample_scan
double init_once_cost() const
Definition: access_path.h:406
IndexSkipScanParameters * param
Definition: access_path.h:1125
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:492
Mem_root_array< AccessPath * > * children
Definition: access_path.h:1083
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:506
AccessPath * table_path
Definition: access_path.h:1143
const auto & aggregate() const
Definition: access_path.h:810
TABLE * table
Definition: access_path.h:975
auto & append()
Definition: access_path.h:854
double cost_before_filter() const
Definition: access_path.h:408
const auto & weedout() const
Definition: access_path.h:874
const auto & cache_invalidator() const
Definition: access_path.h:906
double sampling_percentage
Definition: access_path.h:979
bool needs_buffering
Definition: access_path.h:1303
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:383
QUICK_RANGE ** ranges
Definition: access_path.h:1045
bool is_unique
Definition: access_path.h:1013
const auto & alternative() const
Definition: access_path.h:898
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:306
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:315
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:321
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:308
struct AccessPath::@67::@103 materialize
auto & const_table()
Definition: access_path.h:630
unsigned mrr_length_per_rec
Definition: access_path.h:1184
struct AccessPath::@67::@76 full_text_search
const auto & rowid_intersection() const
Definition: access_path.h:674
Mem_root_array< Item_values_column * > * output_refs
Definition: access_path.h:1149
auto & index_range_scan()
Definition: access_path.h:654
double num_output_rows() const
Definition: access_path.h:927
double num_output_rows_before_filter
If no filter, identical to num_output_rows.
Definition: access_path.h:446
size_t signature
Signature used to uniquely identify the access path.
Definition: access_path.h:555
const auto & ref_or_null() const
Definition: access_path.h:602
struct AccessPath::@67::@95 nested_loop_join
Filesort * filesort
Definition: access_path.h:1232
struct AccessPath::@67::@105 append
bool store_rowids
Definition: access_path.h:1177
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:786
Definition: access_path.h:190
JOIN * join
Definition: access_path.h:192
AccessPath * path
Definition: access_path.h:191
Definition: group_index_skip_scan_plan.h:45
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:73
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:67
A struct containing a join hypergraph of a single query block, encapsulating the constraints given by...
Definition: make_join_hypergraph.h:98
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:80
FunctionalDependencySet functional_dependencies
Definition: access_path.h:97
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:102
RelationalExpression * expr
Definition: access_path.h:81
double selectivity
Definition: access_path.h:82
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:120
std::span< Item * > semijoin_group
Definition: access_path.h:125
size_t estimated_bytes_per_row
Definition: access_path.h:86
Definition: range_optimizer.h:55
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:83
Definition: materialize_path_parameters.h:42
AccessPath * subquery_path
Definition: materialize_path_parameters.h:43
int select_number
Definition: materialize_path_parameters.h:44
JOIN * join
Definition: materialize_path_parameters.h:45
bool copy_items
Definition: materialize_path_parameters.h:47
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:48
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:46
Definition: materialize_path_parameters.h:40
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:80
DedupType deduplication_reason
Definition: materialize_path_parameters.h:111
Mem_root_array< Operand > m_operands
Definition: materialize_path_parameters.h:58
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:65
DedupType
The context for which deduplication is being used.
Definition: materialize_path_parameters.h:106
@ NO_DEDUP
Definition: materialize_path_parameters.h:109
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:99
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:92
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:62
int ref_slice
Definition: materialize_path_parameters.h:74
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:68
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:59
Definition: table.h:298
A position of table within a join order.
Definition: sql_select.h:355
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:133
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:146
bool was_join_condition
Definition: access_path.h:159
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:182
bool possibly_null_complemented_later
Definition: access_path.h:170
FunctionalDependencySet functional_dependencies
Definition: access_path.h:181
int source_multiple_equality_idx
Definition: access_path.h:178
hypergraph::NodeMap used_nodes
Definition: access_path.h:137
Item * condition
Definition: access_path.h:134
double selectivity
Definition: access_path.h:148
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:187
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:155
Type for signature generation and for retrieving nrows estimate from secondary engine for current Acc...
Definition: handler.h:2485
Definition: table.h:1450
tablesample_type
Definition: tablesample.h:27