MySQL 8.4.2
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2024, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
26
27#include <assert.h>
28#include <stdint.h>
29#include <cstddef>
30#include <type_traits>
31#include <utility>
32#include <vector>
33
34#include "my_alloc.h"
35#include "my_base.h"
36#include "my_table_map.h"
37#include "sql/item.h"
38// IWYU suggests removing row_iterator.h, but then the inlined short form of
39// CreateIteratorFromAccessPath() fails to compile. So use a pragma to keep it.
40#include "sql/iterators/row_iterator.h" // IWYU pragma: keep
45#include "sql/join_type.h"
46#include "sql/mem_root_array.h"
47#include "sql/olap.h"
48#include "sql/sql_class.h"
49#include "sql/table.h"
50
52class Filesort;
54class Item_func_match;
55class JOIN;
56class KEY;
58class QEP_TAB;
59class QUICK_RANGE;
60class SJ_TMP_TABLE;
61class Table_function;
63class Window;
64struct AccessPath;
67struct Index_lookup;
68struct KEY_PART;
69struct POSITION;
71
72/**
73 A specification that two specific relational expressions
74 (e.g., two tables, or a table and a join between two other tables)
75 should be joined together. The actual join conditions, if any,
76 live inside the “expr” object, as does the join type etc.
77 */
81
82 // If this join is made using a hash join, estimates the width
83 // of each row as stored in the hash table, in bytes.
85
86 // The set of (additional) functional dependencies that are active
87 // after this join predicate has been applied. E.g. if we're joining
88 // on t1.x = t2.x, there will be a bit for that functional dependency.
89 // We don't currently support more complex join conditions, but there's
90 // no conceptual reason why we couldn't, e.g. a join on a = b + c
91 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
92 // or {a, c} → b.
93 //
94 // Used in the processing of interesting orders.
96
97 // A less compact form of functional_dependencies, used during building
98 // (FunctionalDependencySet bitmaps are only available after all functional
99 // indexes have been collected and Build() has been called).
101
102 // A semijoin on the following format:
103 //
104 // SELECT ... FROM t1 WHERE EXISTS
105 // (SELECT ... FROM t2 WHERE t1.f1=t2.f2 AND t1.f3=t2.f4 ... t1.fn=t2.fm)
106 //
107 // may be transformed into an equivalent inner join:
108 //
109 // SELECT ... FROM (SELECT DISTINCT f2, f4...fm FROM t2) d JOIN t1
110 // ON t1.f1=d.f2 AND t1.f3=d.f4 ... t1.fn=d.fm
111 //
112 // If this is a suitable semijoin: This field will identify the the
113 // grouping given by (f2, f4..fm). (@see
114 // LogicalOrderings::RemapOrderingIndex() for a description of how this
115 // value can be mapped to an actual ordering). The join
116 // optimizer will then consider deduplicating on it and applying the
117 // above transform. If no such grouping was found, this field will be -1.
119
120 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
121 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
122 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
123 Item **semijoin_group = nullptr;
125};
126
127/**
128 A filter of some sort that is not a join condition (those are stored
129 in JoinPredicate objects). AND conditions are typically split up into
130 multiple Predicates.
131 */
132struct Predicate {
134
135 // condition->used_tables(), converted to a NodeMap.
137
138 // tables referred to by the condition, plus any tables whose values
139 // can null any of those tables. (Even when reordering outer joins,
140 // at least one of those tables will still be present on the
141 // left-hand side of the outer join, so this is sufficient.)
142 //
143 // As a special case, we allow setting RAND_TABLE_BIT, even though it
144 // is normally part of a table_map, not a NodeMap.
146
148
149 // Whether this predicate is a join condition after all; it was promoted
150 // to a WHERE predicate since it was part of a cycle (see the comment in
151 // AddCycleEdges()). If it is, it is usually ignored so that we don't
152 // double-apply join conditions -- but if the join in question was not
153 // applied (because the cycle was broken at this point), the predicate
154 // would come into play. This is normally registered on the join itself
155 // (see RelationalExpression::join_predicate_bitmap), but having the bit
156 // on the predicate itself is used to avoid trying to push it down as a
157 // sargable predicate.
158 bool was_join_condition = false;
159
160 // If this is a join condition that came from a multiple equality,
161 // and we have decided to create a mesh from that multiple equality,
162 // returns the index of it into the “multiple_equalities” array
163 // in MakeJoinHypergraph(). (You don't actually need the array to
164 // use this; it's just an opaque index to deduplicate between different
165 // predicates.) Otherwise, -1.
167
168 // See the equivalent fields in JoinPredicate.
171
172 // The list of all subqueries referred to in this predicate, if any.
173 // The optimizer uses this to add their materialized/non-materialized
174 // costs when evaluating filters.
176};
177
181};
182
183/// To indicate that a row estimate is not yet made.
184constexpr double kUnknownRowCount = -1.0;
185
186/// To indicate that a cost estimate is not yet made. We use a large negative
187/// value to avoid getting a positive result if we by mistake add this to
188/// a real (positive) cost.
189constexpr double kUnknownCost = -1e12;
190
191/**
192 Access paths are a query planning structure that correspond 1:1 to iterators,
193 in that an access path contains pretty much exactly the information
194 needed to instantiate given iterator, plus some information that is only
195 needed during planning, such as costs. (The new join optimizer will extend
196 this somewhat in the future. Some iterators also need the query block,
197 ie., JOIN object, they are part of, but that is implicitly available when
198 constructing the tree.)
199
200 AccessPath objects build on a variant, ie., they can hold an access path of
201 any type (table scan, filter, hash join, sort, etc.), although only one at the
202 same time. Currently, they contain 32 bytes of base information that is common
203 to any access path (type identifier, costs, etc.), and then up to 40 bytes
204 that is type-specific (e.g. for a table scan, the TABLE object). It would be
205 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
206 does not seem to be easy without fairly large contortions.
207
208 We could have solved this by inheritance, but the fixed-size design makes it
209 possible to replace an access path when a better one is found, without
210 introducing a new allocation, which will be important when using them as a
211 planning structure.
212 */
214 // Most of the members are declared with initializers, but bit fields cannot
215 // be declared with initializers until C++20, so we need to define a
216 // constructor to initialize those fields.
217 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
218 // too, and use the default constructor generated by the compiler.
220 : count_examined_rows(false),
222#ifndef NDEBUG
223 ,
224 forced_by_dbug(false)
225#endif
226 {
227 }
228
229 enum Type : uint8_t {
230 // Basic access paths (those with no children, at least nominally).
231 // NOTE: When adding more paths to this section, also update GetBasicTable()
232 // to handle them.
252
253 // Basic access paths that don't correspond to a specific table.
260
261 // Joins.
266
267 // Composite access paths.
283
284 // Access paths that modify tables.
288
289 /// A general enum to describe the safety of a given operation.
290 /// Currently we only use this to describe row IDs, but it can easily
291 /// be reused for safety of updating a table we're reading from
292 /// (the Halloween problem), or just generally unreproducible results
293 /// (e.g. a TABLESAMPLE changing due to external factors).
294 ///
295 /// Less safe values have higher numerical values.
296 enum Safety : uint8_t {
297 /// The given operation is always safe on this access path.
298 SAFE = 0,
299
300 /// The given operation is safe if this access path is scanned once,
301 /// but not if it's scanned multiple times (e.g. used on the inner side
302 /// of a nested-loop join). A typical example of this is a derived table
303 /// or CTE that is rematerialized on each scan, so that references to
304 /// the old values (such as row IDs) are no longer valid.
306
307 /// The given operation is unsafe on this access path, no matter how many
308 /// or few times it's scanned. Often, it may help to materialize it
309 /// (assuming the materialization itself doesn't use the operation
310 /// in question).
311 UNSAFE = 2
312 };
313
314 /// Whether it is safe to get row IDs (for sorting) from this access path.
316
317 /// Whether this access path counts as one that scans a base table,
318 /// and thus should be counted towards examined_rows. It can sometimes
319 /// seem a bit arbitrary which iterators count towards examined_rows
320 /// and which ones do not, so the only canonical reference is the tests.
322
323 /// Whether this access path contains a GROUP_INDEX_SKIP_SCAN
325
326#ifndef NDEBUG
327 /// Whether this access path is forced preferred over all others by means
328 /// of a SET DEBUG force_subplan_0x... statement.
330#endif
331
332 /// For UPDATE and DELETE statements: The node index of a table which can be
333 /// updated or deleted from immediately as the rows are read from the
334 /// iterator, if this path is only read from once. -1 if there is no such
335 /// table in this path.
336 ///
337 /// Note that this is an index into CostingReceiver's array of nodes, and is
338 /// not necessarily equal to the table number within the query block given by
339 /// Table_ref::tableno().
340 ///
341 /// The table, if any, is currently always the outermost table in the path.
342 ///
343 /// It is possible to have plans where it would be safe to operate
344 /// "immediately" on more than one table. For example, if we do a merge join,
345 /// it is safe to perform immediate deletes on tables on the inner side of the
346 /// join, since both sides are read only once. (However, we currently do not
347 /// support merge joins.)
348 ///
349 /// Another possibility is when the outer table of a nested loop join is
350 /// guaranteed to return at most one row (typically, a unique index lookup
351 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
352 /// nested loop join. But we don't to this yet.
353 ///
354 /// Hash joins read both sides exactly once, However, with hash joins, the
355 /// scans on the inner tables are not positioned on the correct row when the
356 /// result of the join is returned, so the immediate delete logic will need to
357 /// be changed to reposition the underlying scans before doing the immediate
358 /// deletes. While this can be done, it makes the benefit of immediate deletes
359 /// less obvious for these tables, and it can also be a loss in some cases,
360 /// because we lose the deduplication provided by the Unique object used for
361 /// buffered deletes (the immediate deletes could end up spending time
362 /// repositioning to already deleted rows). So we currently don't attempt to
363 /// do immediate deletes from inner tables of hash joins either.
364 ///
365 /// The outer table of a hash join can be deleted from immediately if the
366 /// inner table fits in memory. If the hash join spills to disk, though,
367 /// neither the rows of the outer table nor the rows of the inner table come
368 /// out in the order of the underlying scan, so it is not safe in general to
369 /// perform immediate deletes on the outer table of a hash join.
370 ///
371 /// If support for immediate operations on multiple tables is added,
372 /// this member could be changed from a node index to a NodeMap.
374
375 /// Which ordering the rows produced by this path follow, if any
376 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
377 /// but we don't want to add a dependency on interesting_orders.h from
378 /// this file, so we use the base type instead of the typedef here.
380
381 /// If an iterator has been instantiated for this access path, points to the
382 /// iterator. Used for constructing iterators that need to talk to each other
383 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
384 /// information in EXPLAIN ANALYZE queries.
386
387 double cost() const { return m_cost; }
388
389 double init_cost() const { return m_init_cost; }
390
391 double init_once_cost() const { return m_init_once_cost; }
392
393 double cost_before_filter() const { return m_cost_before_filter; }
394
395 void set_cost(double val) {
396 assert(val >= 0.0 || val == kUnknownCost);
397 m_cost = val;
398 }
399
400 void set_init_cost(double val) {
401 assert(val >= 0.0 || val == kUnknownCost);
402 m_init_cost = val;
403 }
404
405 void set_init_once_cost(double val) {
406 assert(val >= 0.0);
407 m_init_once_cost = val;
408 }
409
410 void set_cost_before_filter(double val) {
411 assert(val >= 0.0 || val == kUnknownCost);
413 }
414
415 /// Return the cost of scanning the given path for the second time
416 /// (or later) in the given query block. This is really the interesting
417 /// metric, not init_once_cost in itself, but since nearly all paths
418 /// have zero init_once_cost, storing that instead allows us to skip
419 /// a lot of repeated path->init_once_cost = path->init_cost calls
420 /// in the code.
421 double rescan_cost() const { return cost() - init_once_cost(); }
422
423 /// If no filter, identical to num_output_rows.
425
426 /// Bitmap of WHERE predicates that we are including on this access path,
427 /// referring to the “predicates” array internal to the join optimizer.
428 /// Since bit masks are much cheaper to deal with than creating Item
429 /// objects, and we don't invent new conditions during join optimization
430 /// (all of them are known when we begin optimization), we stick to
431 /// manipulating bit masks during optimization, saying which filters will be
432 /// applied at this node (a 1-bit means the filter will be applied here; if
433 /// there are multiple ones, they are ANDed together).
434 ///
435 /// This is used during join optimization only; before iterators are
436 /// created, we will add FILTER access paths to represent these instead,
437 /// removing the dependency on the array. Said FILTER paths are by
438 /// convention created with materialize_subqueries = false, since the by far
439 /// most common case is that there are no subqueries in the predicate.
440 /// In other words, if you wish to represent a filter with
441 /// materialize_subqueries = true, you will need to make an explicit FILTER
442 /// node.
443 ///
444 /// See also nested_loop_join().equijoin_predicates, which is for filters
445 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
447
448 /// Bitmap of sargable join predicates that have already been applied
449 /// in this access path by means of an index lookup (ref access),
450 /// again referring to “predicates”, and thus should not be counted again
451 /// for selectivity. Note that the filter may need to be applied
452 /// nevertheless (especially in case of type conversions); see
453 /// subsumed_sargable_join_predicates.
454 ///
455 /// Since these refer to the same array as filter_predicates, they will
456 /// never overlap with filter_predicates, and so we can reuse the same
457 /// memory using an alias (a union would not be allowed, since OverflowBitset
458 /// is a class with non-trivial default constructor), even though the meaning
459 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
460 /// bits 0..(N-1) belong to filter_predicates, and the rest to
461 /// applied_sargable_join_predicates.
463 return filter_predicates;
464 }
466 return filter_predicates;
467 }
468
469 /// Bitmap of WHERE predicates that touch tables we have joined in,
470 /// but that we could not apply yet (for instance because they reference
471 /// other tables, or because because we could not push them down into
472 /// the nullable side of outer joins). Used during planning only
473 /// (see filter_predicates).
475
476 /// Similar to applied_sargable_join_predicates, bitmap of sargable
477 /// join predicates that have been applied and will subsume the join
478 /// predicate entirely, ie., not only should the selectivity not be
479 /// double-counted, but the predicate itself is redundant and need not
480 /// be applied as a filter. (It is an error to have a bit set here but not
481 /// in applied_sargable_join_predicates.)
483 return delayed_predicates;
484 }
486 return delayed_predicates;
487 }
488
489 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
490 /// loaded when rows from this access path are evaluated; that is, this
491 /// access path must be put on the inner side of a nested-loop join (or
492 /// multiple such joins) where the outer side includes all of the given
493 /// tables.
494 ///
495 /// The most obvious case for this is dependent tables in LATERAL, but a more
496 /// common case is when we have pushed join conditions referring to those
497 /// tables; e.g., if this access path represents t1 and we have a condition
498 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
499 /// be set in this bitmap. We can still join in other tables, deferring t2,
500 /// but the bit(s) will then propagate, and we cannot be on the right side of
501 /// a hash join until parameter_tables is zero again. (Also see
502 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
503 /// as an optimization.)
504 ///
505 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
506 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
507 /// that the access path is entirely noncachable, because it depends on
508 /// something nondeterministic or an outer reference, and thus can never be on
509 /// the right side of a hash join, ever.
511
512 /// Auxiliary data used by a secondary storage engine while processing the
513 /// access path during optimization and execution. The secondary storage
514 /// engine is free to store any useful information in this member, for example
515 /// extra statistics or cost estimates. The data pointed to is fully owned by
516 /// the secondary storage engine, and it is the responsibility of the
517 /// secondary engine to manage the memory and make sure it is properly
518 /// destroyed.
519 void *secondary_engine_data{nullptr};
520
521 // Accessors for the union below.
522 auto &table_scan() {
523 assert(type == TABLE_SCAN);
524 return u.table_scan;
525 }
526 const auto &table_scan() const {
527 assert(type == TABLE_SCAN);
528 return u.table_scan;
529 }
530 auto &sample_scan() {
531 assert(type == SAMPLE_SCAN);
532 return u.sample_scan;
533 }
534 const auto &sample_scan() const {
535 assert(type == SAMPLE_SCAN);
536 return u.sample_scan;
537 }
538 auto &index_scan() {
539 assert(type == INDEX_SCAN);
540 return u.index_scan;
541 }
542 const auto &index_scan() const {
543 assert(type == INDEX_SCAN);
544 return u.index_scan;
545 }
547 assert(type == INDEX_DISTANCE_SCAN);
548 return u.index_distance_scan;
549 }
550 const auto &index_distance_scan() const {
551 assert(type == INDEX_DISTANCE_SCAN);
552 return u.index_distance_scan;
553 }
554 auto &ref() {
555 assert(type == REF);
556 return u.ref;
557 }
558 const auto &ref() const {
559 assert(type == REF);
560 return u.ref;
561 }
562 auto &ref_or_null() {
563 assert(type == REF_OR_NULL);
564 return u.ref_or_null;
565 }
566 const auto &ref_or_null() const {
567 assert(type == REF_OR_NULL);
568 return u.ref_or_null;
569 }
570 auto &eq_ref() {
571 assert(type == EQ_REF);
572 return u.eq_ref;
573 }
574 const auto &eq_ref() const {
575 assert(type == EQ_REF);
576 return u.eq_ref;
577 }
579 assert(type == PUSHED_JOIN_REF);
580 return u.pushed_join_ref;
581 }
582 const auto &pushed_join_ref() const {
583 assert(type == PUSHED_JOIN_REF);
584 return u.pushed_join_ref;
585 }
587 assert(type == FULL_TEXT_SEARCH);
588 return u.full_text_search;
589 }
590 const auto &full_text_search() const {
591 assert(type == FULL_TEXT_SEARCH);
592 return u.full_text_search;
593 }
594 auto &const_table() {
595 assert(type == CONST_TABLE);
596 return u.const_table;
597 }
598 const auto &const_table() const {
599 assert(type == CONST_TABLE);
600 return u.const_table;
601 }
602 auto &mrr() {
603 assert(type == MRR);
604 return u.mrr;
605 }
606 const auto &mrr() const {
607 assert(type == MRR);
608 return u.mrr;
609 }
610 auto &follow_tail() {
611 assert(type == FOLLOW_TAIL);
612 return u.follow_tail;
613 }
614 const auto &follow_tail() const {
615 assert(type == FOLLOW_TAIL);
616 return u.follow_tail;
617 }
619 assert(type == INDEX_RANGE_SCAN);
620 return u.index_range_scan;
621 }
622 const auto &index_range_scan() const {
623 assert(type == INDEX_RANGE_SCAN);
624 return u.index_range_scan;
625 }
626 auto &index_merge() {
627 assert(type == INDEX_MERGE);
628 return u.index_merge;
629 }
630 const auto &index_merge() const {
631 assert(type == INDEX_MERGE);
632 return u.index_merge;
633 }
635 assert(type == ROWID_INTERSECTION);
636 return u.rowid_intersection;
637 }
638 const auto &rowid_intersection() const {
639 assert(type == ROWID_INTERSECTION);
640 return u.rowid_intersection;
641 }
642 auto &rowid_union() {
643 assert(type == ROWID_UNION);
644 return u.rowid_union;
645 }
646 const auto &rowid_union() const {
647 assert(type == ROWID_UNION);
648 return u.rowid_union;
649 }
651 assert(type == INDEX_SKIP_SCAN);
652 return u.index_skip_scan;
653 }
654 const auto &index_skip_scan() const {
655 assert(type == INDEX_SKIP_SCAN);
656 return u.index_skip_scan;
657 }
659 assert(type == GROUP_INDEX_SKIP_SCAN);
660 return u.group_index_skip_scan;
661 }
662 const auto &group_index_skip_scan() const {
663 assert(type == GROUP_INDEX_SKIP_SCAN);
664 return u.group_index_skip_scan;
665 }
668 return u.dynamic_index_range_scan;
669 }
670 const auto &dynamic_index_range_scan() const {
672 return u.dynamic_index_range_scan;
673 }
676 return u.materialized_table_function;
677 }
678 const auto &materialized_table_function() const {
680 return u.materialized_table_function;
681 }
683 assert(type == UNQUALIFIED_COUNT);
684 return u.unqualified_count;
685 }
686 const auto &unqualified_count() const {
687 assert(type == UNQUALIFIED_COUNT);
688 return u.unqualified_count;
689 }
691 assert(type == TABLE_VALUE_CONSTRUCTOR);
692 return u.table_value_constructor;
693 }
694 const auto &table_value_constructor() const {
695 assert(type == TABLE_VALUE_CONSTRUCTOR);
696 return u.table_value_constructor;
697 }
699 assert(type == FAKE_SINGLE_ROW);
700 return u.fake_single_row;
701 }
702 const auto &fake_single_row() const {
703 assert(type == FAKE_SINGLE_ROW);
704 return u.fake_single_row;
705 }
706 auto &zero_rows() {
707 assert(type == ZERO_ROWS);
708 return u.zero_rows;
709 }
710 const auto &zero_rows() const {
711 assert(type == ZERO_ROWS);
712 return u.zero_rows;
713 }
715 assert(type == ZERO_ROWS_AGGREGATED);
716 return u.zero_rows_aggregated;
717 }
718 const auto &zero_rows_aggregated() const {
719 assert(type == ZERO_ROWS_AGGREGATED);
720 return u.zero_rows_aggregated;
721 }
722 auto &hash_join() {
723 assert(type == HASH_JOIN);
724 return u.hash_join;
725 }
726 const auto &hash_join() const {
727 assert(type == HASH_JOIN);
728 return u.hash_join;
729 }
730 auto &bka_join() {
731 assert(type == BKA_JOIN);
732 return u.bka_join;
733 }
734 const auto &bka_join() const {
735 assert(type == BKA_JOIN);
736 return u.bka_join;
737 }
739 assert(type == NESTED_LOOP_JOIN);
740 return u.nested_loop_join;
741 }
742 const auto &nested_loop_join() const {
743 assert(type == NESTED_LOOP_JOIN);
744 return u.nested_loop_join;
745 }
748 return u.nested_loop_semijoin_with_duplicate_removal;
749 }
752 return u.nested_loop_semijoin_with_duplicate_removal;
753 }
754 auto &filter() {
755 assert(type == FILTER);
756 return u.filter;
757 }
758 const auto &filter() const {
759 assert(type == FILTER);
760 return u.filter;
761 }
762 auto &sort() {
763 assert(type == SORT);
764 return u.sort;
765 }
766 const auto &sort() const {
767 assert(type == SORT);
768 return u.sort;
769 }
770 auto &aggregate() {
771 assert(type == AGGREGATE);
772 return u.aggregate;
773 }
774 const auto &aggregate() const {
775 assert(type == AGGREGATE);
776 return u.aggregate;
777 }
779 assert(type == TEMPTABLE_AGGREGATE);
780 return u.temptable_aggregate;
781 }
782 const auto &temptable_aggregate() const {
783 assert(type == TEMPTABLE_AGGREGATE);
784 return u.temptable_aggregate;
785 }
786 auto &limit_offset() {
787 assert(type == LIMIT_OFFSET);
788 return u.limit_offset;
789 }
790 const auto &limit_offset() const {
791 assert(type == LIMIT_OFFSET);
792 return u.limit_offset;
793 }
794 auto &stream() {
795 assert(type == STREAM);
796 return u.stream;
797 }
798 const auto &stream() const {
799 assert(type == STREAM);
800 return u.stream;
801 }
802 auto &materialize() {
803 assert(type == MATERIALIZE);
804 return u.materialize;
805 }
806 const auto &materialize() const {
807 assert(type == MATERIALIZE);
808 return u.materialize;
809 }
812 return u.materialize_information_schema_table;
813 }
816 return u.materialize_information_schema_table;
817 }
818 auto &append() {
819 assert(type == APPEND);
820 return u.append;
821 }
822 const auto &append() const {
823 assert(type == APPEND);
824 return u.append;
825 }
826 auto &window() {
827 assert(type == WINDOW);
828 return u.window;
829 }
830 const auto &window() const {
831 assert(type == WINDOW);
832 return u.window;
833 }
834 auto &weedout() {
835 assert(type == WEEDOUT);
836 return u.weedout;
837 }
838 const auto &weedout() const {
839 assert(type == WEEDOUT);
840 return u.weedout;
841 }
843 assert(type == REMOVE_DUPLICATES);
844 return u.remove_duplicates;
845 }
846 const auto &remove_duplicates() const {
847 assert(type == REMOVE_DUPLICATES);
848 return u.remove_duplicates;
849 }
852 return u.remove_duplicates_on_index;
853 }
854 const auto &remove_duplicates_on_index() const {
856 return u.remove_duplicates_on_index;
857 }
858 auto &alternative() {
859 assert(type == ALTERNATIVE);
860 return u.alternative;
861 }
862 const auto &alternative() const {
863 assert(type == ALTERNATIVE);
864 return u.alternative;
865 }
867 assert(type == CACHE_INVALIDATOR);
868 return u.cache_invalidator;
869 }
870 const auto &cache_invalidator() const {
871 assert(type == CACHE_INVALIDATOR);
872 return u.cache_invalidator;
873 }
874 auto &delete_rows() {
875 assert(type == DELETE_ROWS);
876 return u.delete_rows;
877 }
878 const auto &delete_rows() const {
879 assert(type == DELETE_ROWS);
880 return u.delete_rows;
881 }
882 auto &update_rows() {
883 assert(type == UPDATE_ROWS);
884 return u.update_rows;
885 }
886 const auto &update_rows() const {
887 assert(type == UPDATE_ROWS);
888 return u.update_rows;
889 }
890
891 double num_output_rows() const { return m_num_output_rows; }
892
893 void set_num_output_rows(double val) { m_num_output_rows = val; }
894
895 private:
896 /// Expected number of output rows.
898
899 /// Expected cost to read all of this access path once.
901
902 /// Expected cost to initialize this access path; ie., cost to read
903 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
904 /// Note that EXPLAIN prints out cost of reading the _first_ row
905 /// because it is easier for the user and also easier to measure in
906 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
907 /// initialization cost, so that is what we use in this member.
908 /// kUnknownCost for unknown.
910
911 /// Of init_cost, how much of the initialization needs only to be done
912 /// once per query block. (This is a cost, not a proportion.)
913 /// Ie., if the access path can reuse some its initialization work
914 /// if Init() is called multiple times, this member will be nonzero.
915 /// A typical example is a materialized table with rematerialize=false;
916 /// the second time Init() is called, it's a no-op. Most paths will have
917 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
918 /// We do not intend to use this field to model cache effects.
919 ///
920 /// This is currently not printed in EXPLAIN, only optimizer trace.
921 double m_init_once_cost{0.0};
922
923 /// If no filter, identical to cost. init_cost is always the same
924 /// (filters have zero initialization cost).
926
927 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
928 // It is private to force all access to be through the type-checking
929 // accessors.
930 //
931 // For information about the meaning of each value, see the corresponding
932 // row iterator constructors.
933 union {
934 struct {
937 struct {
938 TABLE *table;
942 struct {
943 TABLE *table;
944 int idx;
948 struct {
949 TABLE *table;
950 int idx;
952 bool reverse;
954 struct {
955 TABLE *table;
957 bool use_order;
958 bool reverse;
960 struct {
961 TABLE *table;
963 bool use_order;
965 struct {
966 TABLE *table;
969 struct {
970 TABLE *table;
972 bool use_order;
975 struct {
976 TABLE *table;
978 bool use_order;
982 struct {
983 TABLE *table;
986 struct {
987 TABLE *table;
993 struct {
994 TABLE *table;
996 struct {
997 // The key part(s) we are scanning on. Note that this may be an array.
998 // You can get the table we are working on by looking into
999 // used_key_parts[0].field->table (it is not stored directly, to avoid
1000 // going over the AccessPath size limits).
1002
1003 // The actual ranges we are scanning over (originally derived from “key”).
1004 // Not a Bounds_checked_array, to save 4 bytes on the length.
1006 unsigned num_ranges;
1007
1008 unsigned mrr_flags;
1010
1011 // Which index (in the TABLE) we are scanning over, and how many of its
1012 // key parts we are using.
1013 unsigned index;
1015
1016 // If true, the scan can return rows in rowid order.
1018
1019 // If true, the scan _should_ return rows in rowid order.
1020 // Should only be set if can_be_used_for_ror == true.
1022
1023 // If true, this plan can be used for index merge scan.
1025
1026 // See row intersection for more details.
1028
1029 // Whether we are scanning over a geometry key part.
1030 bool geometry : 1;
1031
1032 // Whether we need a reverse scan. Only supported if geometry == false.
1033 bool reverse : 1;
1034
1035 // For a reverse scan, if we are using extended key parts. It is needed,
1036 // to set correct flags when retrieving records.
1039 struct {
1040 TABLE *table;
1045 struct {
1046 TABLE *table;
1048
1049 // Clustered primary key scan, if any.
1051
1052 bool forced_by_hint;
1055
1056 // If true, the first child scan should reuse table->file instead of
1057 // creating its own. This is true if the intersection is the topmost
1058 // range scan, but _not_ if it's below a union. (The reasons for this
1059 // are unknown.) It can also be negated by logic involving
1060 // retrieve_full_rows and is_covering, again for unknown reasons.
1061 //
1062 // This is not only for performance; multi-table delete has a hidden
1063 // dependency on this behavior when running against certain types of
1064 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
1065 // when deleting (and not all table types can transfer the position of one
1066 // handler to another by using position()).
1067 bool reuse_handler;
1068
1069 // true if no row retrieval phase is necessary.
1072 struct {
1073 TABLE *table;
1075 bool forced_by_hint;
1077 struct {
1078 TABLE *table;
1079 unsigned index;
1080 unsigned num_used_key_parts;
1081 bool forced_by_hint;
1082
1083 // Large, and has nontrivial destructors, so split out into
1084 // its own allocation.
1087 struct {
1088 TABLE *table;
1089 unsigned index;
1090 unsigned num_used_key_parts;
1091 bool forced_by_hint;
1092
1093 // Large, so split out into its own allocation.
1096 struct {
1097 TABLE *table;
1098 QEP_TAB *qep_tab; // Used only for buffering.
1100 struct {
1101 TABLE *table;
1105 struct {
1107
1108 struct {
1111 struct {
1112 // No members.
1114 struct {
1115 // The child is optional. It is only used for keeping track of which
1116 // tables are pruned away by this path, and it is only needed when this
1117 // path is on the inner side of an outer join. See ZeroRowsIterator for
1118 // details. The child of a ZERO_ROWS access path will not be visited by
1119 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1120 // only if called with include_pruned_tables = true. No iterator is
1121 // created for the child, and the child is not shown by EXPLAIN.
1123 // Used for EXPLAIN only.
1124 // TODO(sgunders): make an enum.
1125 const char *cause;
1127 struct {
1128 // Used for EXPLAIN only.
1129 // TODO(sgunders): make an enum.
1130 const char *cause;
1132
1133 struct {
1137 bool store_rowids; // Whether we are below a weedout or not.
1141 struct {
1146 bool store_rowids; // Whether we are below a weedout or not.
1149 struct {
1151 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1155
1156 // Equijoin filters to apply before the join, if any.
1157 // Indexes into join_predicate->expr->equijoin_conditions.
1158 // Non-equijoin conditions are always applied.
1159 // If already_expanded_predicates is true, do not re-expand.
1161
1162 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1163 // this struct needs an initializer, or the union would not be
1164 // default-constructible. If we need more than one union member
1165 // with such an initializer, we would probably need to change
1166 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1167 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1168 nullptr, {}};
1169 struct {
1171 const TABLE *table;
1173 size_t key_len;
1175
1176 struct {
1179
1180 // This parameter, unlike nearly all others, is not passed to the the
1181 // actual iterator. Instead, if true, it signifies that when creating
1182 // the iterator, all materializable subqueries in “condition” should be
1183 // materialized (with any in2exists condition removed first). In the
1184 // very rare case that there are two or more such subqueries, this is
1185 // an all-or-nothing decision, for simplicity.
1186 //
1187 // See FinalizeMaterializedSubqueries().
1190 struct {
1194
1195 // If filesort is nullptr: A new filesort will be created at the
1196 // end of optimization, using this order and flags. Otherwise: Only
1197 // used by EXPLAIN.
1204 struct {
1208 struct {
1211 TABLE *table;
1215 struct {
1217 ha_rows limit;
1221 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1222 // See Query_expression::send_records.
1225 struct {
1229 TABLE *table;
1231 int ref_slice;
1233 struct {
1234 // NOTE: The only legal access paths within table_path are
1235 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1236 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1238
1239 // Large, and has nontrivial destructors, so split out
1240 // into its own allocation.
1242 /** The total cost of executing the queries that we materialize.*/
1245 struct {
1248 Item *condition;
1250 struct {
1253 struct {
1258 int ref_slice;
1261 struct {
1266 struct {
1271 struct {
1273 TABLE *table;
1274 KEY *key;
1277 struct {
1279
1280 // For the ref.
1284 struct {
1286 const char *name;
1288 struct {
1293 struct {
1298 } u;
1299};
1300static_assert(std::is_trivially_destructible<AccessPath>::value,
1301 "AccessPath must be trivially destructible, as it is allocated "
1302 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1303 "(because multiple candidates during planning could point to "
1304 "the same access paths, and refcounting would be expensive)");
1305static_assert(sizeof(AccessPath) <= 144,
1306 "We are creating a lot of access paths in the join "
1307 "optimizer, so be sure not to bloat it without noticing. "
1308 "(96 bytes for the base, 48 bytes for the variant.)");
1309
1310inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1312 to->set_cost(from.cost());
1313 to->set_init_cost(from.init_cost());
1316 to->safe_for_rowid = from.safe_for_rowid;
1317 to->ordering_state = from.ordering_state;
1319}
1320
1321// Trivial factory functions for all of the types of access paths above.
1322
1324 bool count_examined_rows) {
1325 AccessPath *path = new (thd->mem_root) AccessPath;
1327 path->count_examined_rows = count_examined_rows;
1328 path->table_scan().table = table;
1329 return path;
1330}
1331
1333 double sampling_percentage,
1334 bool count_examined_rows) {
1335 AccessPath *path = new (thd->mem_root) AccessPath;
1337 path->count_examined_rows = count_examined_rows;
1338 path->sample_scan().table = table;
1339 path->sample_scan().sampling_percentage = sampling_percentage;
1340 return path;
1341}
1342
1344 bool use_order, bool reverse,
1345 bool count_examined_rows) {
1346 AccessPath *path = new (thd->mem_root) AccessPath;
1348 path->count_examined_rows = count_examined_rows;
1349 path->index_scan().table = table;
1350 path->index_scan().idx = idx;
1351 path->index_scan().use_order = use_order;
1352 path->index_scan().reverse = reverse;
1353 return path;
1354}
1355
1357 bool use_order, bool reverse,
1358 bool count_examined_rows) {
1359 AccessPath *path = new (thd->mem_root) AccessPath;
1360 path->type = AccessPath::REF;
1361 path->count_examined_rows = count_examined_rows;
1362 path->ref().table = table;
1363 path->ref().ref = ref;
1364 path->ref().use_order = use_order;
1365 path->ref().reverse = reverse;
1366 return path;
1367}
1368
1370 Index_lookup *ref, bool use_order,
1371 bool count_examined_rows) {
1372 AccessPath *path = new (thd->mem_root) AccessPath;
1374 path->count_examined_rows = count_examined_rows;
1375 path->ref_or_null().table = table;
1376 path->ref_or_null().ref = ref;
1377 path->ref_or_null().use_order = use_order;
1378 return path;
1379}
1380
1382 bool count_examined_rows) {
1383 AccessPath *path = new (thd->mem_root) AccessPath;
1384 path->type = AccessPath::EQ_REF;
1385 path->count_examined_rows = count_examined_rows;
1386 path->eq_ref().table = table;
1387 path->eq_ref().ref = ref;
1388 return path;
1389}
1390
1392 Index_lookup *ref, bool use_order,
1393 bool is_unique,
1394 bool count_examined_rows) {
1395 AccessPath *path = new (thd->mem_root) AccessPath;
1397 path->count_examined_rows = count_examined_rows;
1398 path->pushed_join_ref().table = table;
1399 path->pushed_join_ref().ref = ref;
1400 path->pushed_join_ref().use_order = use_order;
1401 path->pushed_join_ref().is_unique = is_unique;
1402 return path;
1403}
1404
1407 Item_func_match *ft_func,
1408 bool use_order, bool use_limit,
1409 bool count_examined_rows) {
1410 AccessPath *path = new (thd->mem_root) AccessPath;
1412 path->count_examined_rows = count_examined_rows;
1413 path->full_text_search().table = table;
1414 path->full_text_search().ref = ref;
1415 path->full_text_search().use_order = use_order;
1416 path->full_text_search().use_limit = use_limit;
1417 path->full_text_search().ft_func = ft_func;
1418 return path;
1419}
1420
1423 bool count_examined_rows) {
1424 AccessPath *path = new (thd->mem_root) AccessPath;
1426 path->count_examined_rows = count_examined_rows;
1427 path->set_num_output_rows(1.0);
1428 path->set_cost(0.0);
1429 path->set_init_cost(0.0);
1430 path->set_init_once_cost(0.0);
1431 path->const_table().table = table;
1432 path->const_table().ref = ref;
1433 return path;
1434}
1435
1437 int mrr_flags) {
1438 AccessPath *path = new (thd->mem_root) AccessPath;
1439 path->type = AccessPath::MRR;
1440 path->mrr().table = table;
1441 path->mrr().ref = ref;
1442 path->mrr().mrr_flags = mrr_flags;
1443
1444 // This will be filled in when the BKA iterator is created.
1445 path->mrr().bka_path = nullptr;
1446
1447 return path;
1448}
1449
1451 bool count_examined_rows) {
1452 AccessPath *path = new (thd->mem_root) AccessPath;
1454 path->count_examined_rows = count_examined_rows;
1455 path->follow_tail().table = table;
1456 return path;
1457}
1458
1460 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1461 AccessPath *path = new (thd->mem_root) AccessPath;
1463 path->count_examined_rows = count_examined_rows;
1464 path->dynamic_index_range_scan().table = table;
1465 path->dynamic_index_range_scan().qep_tab = qep_tab;
1466 return path;
1467}
1468
1470 THD *thd, TABLE *table, Table_function *table_function,
1471 AccessPath *table_path) {
1472 AccessPath *path = new (thd->mem_root) AccessPath;
1474 path->materialized_table_function().table = table;
1475 path->materialized_table_function().table_function = table_function;
1476 path->materialized_table_function().table_path = table_path;
1477 return path;
1478}
1479
1481 AccessPath *path = new (thd->mem_root) AccessPath;
1483 return path;
1484}
1485
1487 const JOIN *join);
1488
1490 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1491 KEY *key, size_t key_len) {
1492 AccessPath *path = new (thd->mem_root) AccessPath;
1494 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1495 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1496 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1497 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1498 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1499 return path;
1500}
1501
1503 Item *condition) {
1504 AccessPath *path = new (thd->mem_root) AccessPath;
1505 path->type = AccessPath::FILTER;
1506 path->filter().child = child;
1507 path->filter().condition = condition;
1508 path->filter().materialize_subqueries = false;
1509 path->has_group_skip_scan = child->has_group_skip_scan;
1510 return path;
1511}
1512
1513// Not inline, because it needs access to filesort internals
1514// (which are forward-declared in this file).
1516 ORDER *order, bool count_examined_rows);
1517
1519 olap_type olap) {
1520 AccessPath *path = new (thd->mem_root) AccessPath;
1522 path->aggregate().child = child;
1523 path->aggregate().olap = olap;
1524 path->has_group_skip_scan = child->has_group_skip_scan;
1525 return path;
1526}
1527
1529 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1530 TABLE *table, AccessPath *table_path, int ref_slice) {
1531 AccessPath *path = new (thd->mem_root) AccessPath;
1533 path->temptable_aggregate().subquery_path = subquery_path;
1534 path->temptable_aggregate().temp_table_param = temp_table_param;
1535 path->temptable_aggregate().table = table;
1536 path->temptable_aggregate().table_path = table_path;
1537 path->temptable_aggregate().ref_slice = ref_slice;
1538 return path;
1539}
1540
1542 ha_rows limit, ha_rows offset,
1543 bool count_all_rows,
1544 bool reject_multiple_rows,
1545 ha_rows *send_records_override) {
1547 AccessPath *path = new (thd->mem_root) AccessPath;
1549 path->immediate_update_delete_table = child->immediate_update_delete_table;
1550 path->limit_offset().child = child;
1551 path->limit_offset().limit = limit;
1552 path->limit_offset().offset = offset;
1553 path->limit_offset().count_all_rows = count_all_rows;
1554 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1555 path->limit_offset().send_records_override = send_records_override;
1556 path->ordering_state = child->ordering_state;
1557 path->has_group_skip_scan = child->has_group_skip_scan;
1559 return path;
1560}
1561
1563 bool count_examined_rows) {
1564 AccessPath *path = new (thd->mem_root) AccessPath;
1566 path->count_examined_rows = count_examined_rows;
1567 path->set_num_output_rows(1.0);
1568 path->set_cost(0.0);
1569 path->set_init_cost(0.0);
1570 path->set_init_once_cost(0.0);
1571 return path;
1572}
1573
1575 const char *cause) {
1576 AccessPath *path = new (thd->mem_root) AccessPath;
1578 path->zero_rows().child = child;
1579 path->zero_rows().cause = cause;
1580 path->set_num_output_rows(0.0);
1581 path->set_cost(0.0);
1582 path->set_init_cost(0.0);
1583 path->set_init_once_cost(0.0);
1584 path->num_output_rows_before_filter = 0.0;
1585 path->set_cost_before_filter(0.0);
1586 return path;
1587}
1588
1589inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1590 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1591}
1592
1594 const char *cause) {
1595 AccessPath *path = new (thd->mem_root) AccessPath;
1597 path->zero_rows_aggregated().cause = cause;
1598 path->set_num_output_rows(1.0);
1599 path->set_cost(0.0);
1600 path->set_init_cost(0.0);
1601 return path;
1602}
1603
1605 JOIN *join,
1606 Temp_table_param *temp_table_param,
1607 TABLE *table, int ref_slice) {
1608 AccessPath *path = new (thd->mem_root) AccessPath;
1609 path->type = AccessPath::STREAM;
1610 path->stream().child = child;
1611 path->stream().join = join;
1612 path->stream().temp_table_param = temp_table_param;
1613 path->stream().table = table;
1614 path->stream().ref_slice = ref_slice;
1615 // Will be set later if we get a weedout access path as parent.
1616 path->stream().provide_rowid = false;
1617 path->has_group_skip_scan = child->has_group_skip_scan;
1618 return path;
1619}
1620
1623 JOIN *join, bool copy_items,
1624 Temp_table_param *temp_table_param) {
1625 assert(path != nullptr);
1627 MaterializePathParameters::Operand &operand = array[0];
1628 operand.subquery_path = path;
1629 operand.select_number = select_number;
1630 operand.join = join;
1632 operand.copy_items = copy_items;
1633 operand.temp_table_param = temp_table_param;
1634 return array;
1635}
1636
1640 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1641 int ref_slice, bool rematerialize, ha_rows limit_rows,
1642 bool reject_multiple_rows) {
1645 param->m_operands = std::move(operands);
1646 if (rematerialize) {
1647 // There's no point in adding invalidators if we're rematerializing
1648 // every time anyway.
1649 param->invalidators = nullptr;
1650 } else {
1651 param->invalidators = invalidators;
1652 }
1653 param->table = table;
1654 param->cte = cte;
1655 param->unit = unit;
1656 param->ref_slice = ref_slice;
1657 param->rematerialize = rematerialize;
1658 param->limit_rows = (table == nullptr || table->is_union_or_table()
1659 ? limit_rows
1660 :
1661 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1662 // see its constructor
1663 HA_POS_ERROR);
1664 param->reject_multiple_rows = reject_multiple_rows;
1665
1666#ifndef NDEBUG
1667 for (MaterializePathParameters::Operand &operand : param->m_operands) {
1668 assert(operand.subquery_path != nullptr);
1669 }
1670#endif
1671
1672 AccessPath *path = new (thd->mem_root) AccessPath;
1674 path->materialize().table_path = table_path;
1675 path->materialize().param = param;
1676 path->materialize().subquery_cost = kUnknownCost;
1677 if (rematerialize) {
1678 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1679 } else {
1680 // The default; this is just to be explicit in the code.
1681 path->safe_for_rowid = AccessPath::SAFE;
1682 }
1683 return path;
1684}
1685
1687 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1688 AccessPath *path = new (thd->mem_root) AccessPath;
1690 path->materialize_information_schema_table().table_path = table_path;
1691 path->materialize_information_schema_table().table_list = table_list;
1692 path->materialize_information_schema_table().condition = condition;
1693 return path;
1694}
1695
1696/// Add path costs c1 and c2, but handle kUnknownCost correctly.
1697inline double AddCost(double c1, double c2) {
1698 // If one is undefined, use the other, as we have nothing else.
1699 if (c1 == kUnknownCost) {
1700 return c2;
1701 } else if (c2 == kUnknownCost) {
1702 return c1;
1703 } else {
1704 return c1 + c2;
1705 }
1706}
1707
1708// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1709// long as the access path.
1711 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1712 AccessPath *path = new (thd->mem_root) AccessPath;
1713 path->type = AccessPath::APPEND;
1714 path->append().children = children;
1715 double num_output_rows = 0.0;
1716 for (const AppendPathParameters &child : *children) {
1717 path->set_cost(AddCost(path->cost(), child.path->cost()));
1718 path->set_init_cost(AddCost(path->init_cost(), child.path->init_cost()));
1719 path->set_init_once_cost(path->init_once_cost() +
1720 child.path->init_once_cost());
1721 num_output_rows += child.path->num_output_rows();
1722 }
1723 path->set_num_output_rows(num_output_rows);
1724 return path;
1725}
1726
1728 Window *window,
1729 Temp_table_param *temp_table_param,
1730 int ref_slice, bool needs_buffering) {
1731 AccessPath *path = new (thd->mem_root) AccessPath;
1732 path->type = AccessPath::WINDOW;
1733 path->window().child = child;
1734 path->window().window = window;
1735 path->window().temp_table = nullptr;
1736 path->window().temp_table_param = temp_table_param;
1737 path->window().ref_slice = ref_slice;
1738 path->window().needs_buffering = needs_buffering;
1739 path->set_num_output_rows(child->num_output_rows());
1740 return path;
1741}
1742
1744 SJ_TMP_TABLE *weedout_table) {
1745 AccessPath *path = new (thd->mem_root) AccessPath;
1746 path->type = AccessPath::WEEDOUT;
1747 path->weedout().child = child;
1748 path->weedout().weedout_table = weedout_table;
1749 path->weedout().tables_to_get_rowid_for =
1750 0; // Must be handled by the caller.
1751 return path;
1752}
1753
1755 Item **group_items,
1756 int group_items_size) {
1757 AccessPath *path = new (thd->mem_root) AccessPath;
1759 path->remove_duplicates().child = child;
1760 path->remove_duplicates().group_items = group_items;
1761 path->remove_duplicates().group_items_size = group_items_size;
1762 path->has_group_skip_scan = child->has_group_skip_scan;
1763 return path;
1764}
1765
1767 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1768 unsigned loosescan_key_len) {
1769 AccessPath *path = new (thd->mem_root) AccessPath;
1771 path->remove_duplicates_on_index().child = child;
1772 path->remove_duplicates_on_index().table = table;
1773 path->remove_duplicates_on_index().key = key;
1774 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1775 path->has_group_skip_scan = child->has_group_skip_scan;
1776 return path;
1777}
1778
1780 AccessPath *table_scan_path,
1781 Index_lookup *used_ref) {
1782 AccessPath *path = new (thd->mem_root) AccessPath;
1784 path->alternative().table_scan_path = table_scan_path;
1785 path->alternative().child = child;
1786 path->alternative().used_ref = used_ref;
1787 return path;
1788}
1789
1791 const char *name) {
1792 AccessPath *path = new (thd->mem_root) AccessPath;
1794 path->cache_invalidator().child = child;
1795 path->cache_invalidator().name = name;
1796 return path;
1797}
1798
1801 table_map immediate_tables);
1802
1805 table_map immediate_tables);
1806
1807/**
1808 Modifies "path" and the paths below it so that they provide row IDs for
1809 all tables.
1810 */
1812
1815 bool eligible_for_batch_mode);
1816
1817// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1818// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1819// that does anything else is DynamicRangeIterator.)
1821 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1823 eligible_for_batch_mode);
1824}
1825
1826void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1827 const POSITION *pos, bool is_after_filter,
1828 AccessPath *path);
1829
1830/**
1831 Return the TABLE* referred from 'path' if it is a basic access path,
1832 else a nullptr is returned. Temporary tables, such as those used by
1833 sorting, aggregate and subquery materialization are not returned.
1834*/
1836
1837/**
1838 Returns a map of all tables read when `path` or any of its children are
1839 executed. Only iterators that are part of the same query block as `path`
1840 are considered.
1841
1842 If a table is read that doesn't have a map, specifically the temporary
1843 tables made as part of materialization within the same query block,
1844 RAND_TABLE_BIT will be set as a convention and none of that access path's
1845 children will be included in the map. In this case, the caller will need to
1846 manually go in and find said access path, to ask it for its TABLE object.
1847
1848 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1849 access path (ie., pruned away due to impossible join conditions) will be
1850 included in the map. This is normally what you want, as those tables need to
1851 be included whenever you store NULL flags and the likes, but if you don't
1852 want them (perhaps to specifically check for conditions referring to pruned
1853 tables), you can set it to false.
1854 */
1855table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1856
1857/**
1858 Find the list of all tables used by this root, stopping at materializations.
1859 Used for knowing which tables to sort.
1860 */
1862
1863/**
1864 For each access path in the (sub)tree rooted at “path”, expand any use of
1865 “filter_predicates” into newly-inserted FILTER access paths, using the given
1866 predicate list. This is used after finding an optimal set of access paths,
1867 to normalize the tree so that the remaining consumers do not need to worry
1868 about filter_predicates and cost_before_filter.
1869
1870 “join” is the join that “path” is part of.
1871 */
1873 const Mem_root_array<Predicate> &predicates,
1874 unsigned num_where_predicates);
1875
1876/**
1877 Extracts the Item expression from the given “filter_predicates” corresponding
1878 to the given “mask”.
1879 */
1882 int num_where_predicates);
1883
1884/// Like ExpandFilterAccessPaths(), but expands only the single access path
1885/// at “path”.
1887 const Mem_root_array<Predicate> &predicates,
1888 unsigned num_where_predicates);
1889
1890/// Returns the tables that are part of a hash join.
1892
1893/**
1894 Get the conditions to put into the extra conditions of the HashJoinIterator.
1895 This includes the non-equijoin conditions, as well as any equijoin conditions
1896 on columns that are too big to include in the hash table. (The old optimizer
1897 handles equijoin conditions on long columns elsewhere, so the last part only
1898 applies to the hypergraph optimizer.)
1899
1900 @param mem_root The root on which to allocate memory, if needed.
1901 @param using_hypergraph_optimizer True if using the hypergraph optimizer.
1902 @param equijoin_conditions All the equijoin conditions of the join.
1903 @param other_conditions All the non-equijoin conditions of the join.
1904
1905 @return All the conditions to evaluate as "extra conditions" in
1906 HashJoinIterator, or nullptr on OOM.
1907 */
1909 MEM_ROOT *mem_root, bool using_hypergraph_optimizer,
1910 const std::vector<HashJoinCondition> &equijoin_conditions,
1911 const Mem_root_array<Item *> &other_conditions);
1912
1913#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
constexpr double kUnknownCost
To indicate that a cost estimate is not yet made.
Definition: access_path.h:189
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1604
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1754
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1790
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1356
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:122
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1310
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1496
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1405
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1528
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1621
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1459
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1593
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1779
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:134
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1436
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:272
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1710
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1489
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1766
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1562
AccessPath * NewTableValueConstructorAccessPath(const THD *thd, const JOIN *join)
Definition: access_path.cc:166
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1485
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1502
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:184
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:87
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1469
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::Operand > operands, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1637
const Mem_root_array< Item * > * GetExtraHashJoinConditions(MEM_ROOT *mem_root, bool using_hypergraph_optimizer, const std::vector< HashJoinCondition > &equijoin_conditions, const Mem_root_array< Item * > &other_conditions)
Get the conditions to put into the extra conditions of the HashJoinIterator.
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:303
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1323
AccessPath * NewSampleScanAccessPath(THD *thd, TABLE *table, double sampling_percentage, bool count_examined_rows)
Definition: access_path.h:1332
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, olap_type olap)
Definition: access_path.h:1518
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1609
double AddCost(double c1, double c2)
Add path costs c1 and c2, but handle kUnknownCost correctly.
Definition: access_path.h:1697
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1686
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1341
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:215
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1369
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1541
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1381
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1727
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1450
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1421
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:488
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1743
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1391
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1574
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1480
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1343
Mem_root_array< MaterializePathParameters::Operand > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1622
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4428
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:54
Sorting related info.
Definition: filesort.h:52
A class that represents a join condition in a hash join.
Definition: item_cmpfunc.h:87
Definition: item_func.h:3465
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:936
Definition: sql_optimizer.h:133
Definition: key.h:113
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:426
Definition: overflow_bitset.h:78
Definition: sql_executor.h:254
Definition: range_optimizer.h:69
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:626
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
Definition: sql_executor.h:95
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:40
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2864
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:95
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:110
static MEM_ROOT mem_root
Definition: client_plugin.cc:114
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a WINDOW AccessPath.
Definition: cost_model.cc:941
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:367
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1872
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:63
JoinType
Definition: join_type.h:28
static mi_bit_type mask[]
Definition: mi_packrec.cc:141
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:477
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1141
#define HA_POS_ERROR
Definition: my_base.h:1143
uint64_t table_map
Definition: my_table_map.h:30
static char * path
Definition: mysqldump.cc:149
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:40
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:132
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:151
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:39
olap_type
Definition: olap.h:31
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
case opt name
Definition: sslopt-case.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:213
auto & weedout()
Definition: access_path.h:834
struct AccessPath::@62::@79 index_skip_scan
struct AccessPath::@62::@86 zero_rows
auto & filter()
Definition: access_path.h:754
bool count_all_rows
Definition: access_path.h:1219
AccessPath * bka_path
Definition: access_path.h:989
struct AccessPath::@62::@63 table_scan
auto & ref_or_null()
Definition: access_path.h:562
auto & materialized_table_function()
Definition: access_path.h:674
AccessPath * cpk_child
Definition: access_path.h:1050
auto & rowid_union()
Definition: access_path.h:642
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:510
const auto & bka_join() const
Definition: access_path.h:734
TABLE * temp_table
Definition: access_path.h:1256
OverflowBitset equijoin_predicates
Definition: access_path.h:1160
Item ** group_items
Definition: access_path.h:1268
struct AccessPath::@62::@102 weedout
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:421
auto & materialize()
Definition: access_path.h:802
const auto & temptable_aggregate() const
Definition: access_path.h:782
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:482
auto & index_scan()
Definition: access_path.h:538
const auto & delete_rows() const
Definition: access_path.h:878
const auto & filter() const
Definition: access_path.h:758
const auto & index_distance_scan() const
Definition: access_path.h:550
auto & delete_rows()
Definition: access_path.h:874
bool reuse_handler
Definition: access_path.h:1027
struct AccessPath::@62::@89 bka_join
KEY_PART * used_key_part
Definition: access_path.h:1001
double m_init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:909
olap_type olap
Definition: access_path.h:1206
struct AccessPath::@62::@71 full_text_search
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:462
Item * condition
Definition: access_path.h:1178
struct AccessPath::@62::@74 follow_tail
struct AccessPath::@62::@90 nested_loop_join
const auto & index_merge() const
Definition: access_path.h:630
const auto & update_rows() const
Definition: access_path.h:886
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:858
auto & pushed_join_ref()
Definition: access_path.h:578
void set_cost(double val)
Definition: access_path.h:395
AccessPath * outer
Definition: access_path.h:1134
auto & index_skip_scan()
Definition: access_path.h:650
struct AccessPath::@62::@69 eq_ref
bool rewrite_semi_to_inner
Definition: access_path.h:1138
const auto & zero_rows_aggregated() const
Definition: access_path.h:718
auto & group_index_skip_scan()
Definition: access_path.h:658
struct AccessPath::@62::@106 cache_invalidator
const auto & eq_ref() const
Definition: access_path.h:574
int group_items_size
Definition: access_path.h:1269
bool use_order
Definition: access_path.h:945
bool pfs_batch_mode
Definition: access_path.h:1152
auto & temptable_aggregate()
Definition: access_path.h:778
AccessPath()
Definition: access_path.h:219
GroupIndexSkipScanParameters * param
Definition: access_path.h:1094
auto & rowid_intersection()
Definition: access_path.h:634
struct AccessPath::@62::@80 group_index_skip_scan
double init_cost() const
Definition: access_path.h:389
struct AccessPath::@62::@105 alternative
bool materialize_subqueries
Definition: access_path.h:1188
AccessPath * child
Definition: access_path.h:1122
bool allow_spill_to_disk
Definition: access_path.h:1136
struct AccessPath::@62::@68 ref_or_null
Index_lookup * used_ref
Definition: access_path.h:1282
struct AccessPath::@62::@97 stream
auto & mrr()
Definition: access_path.h:602
auto & sample_scan()
Definition: access_path.h:530
const auto & index_scan() const
Definition: access_path.h:542
const auto & fake_single_row() const
Definition: access_path.h:702
struct AccessPath::@62::@96 limit_offset
bool reject_multiple_rows
Definition: access_path.h:1220
bool allow_clustered_primary_key_scan
Definition: access_path.h:1042
const char * name
Definition: access_path.h:1286
bool use_limit
Definition: access_path.h:979
Window * window
Definition: access_path.h:1255
table_map tables_to_get_rowid_for
Definition: access_path.h:1139
auto & materialize_information_schema_table()
Definition: access_path.h:810
struct AccessPath::@62::@100 append
auto & bka_join()
Definition: access_path.h:730
struct AccessPath::@62::@94 aggregate
const auto & follow_tail() const
Definition: access_path.h:614
struct AccessPath::@62::@99 materialize_information_schema_table
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1263
bool has_group_skip_scan
Whether this access path contains a GROUP_INDEX_SKIP_SCAN.
Definition: access_path.h:324
Index_lookup * ref
Definition: access_path.h:956
struct AccessPath::@62::@88 hash_join
const auto & window() const
Definition: access_path.h:830
void set_init_once_cost(double val)
Definition: access_path.h:405
const auto & materialized_table_function() const
Definition: access_path.h:678
struct AccessPath::@62::@107 delete_rows
auto & unqualified_count()
Definition: access_path.h:682
struct AccessPath::@62::@78 rowid_union
bool keep_current_rowid
Definition: access_path.h:991
bool using_extended_key_parts
Definition: access_path.h:1037
auto & nested_loop_join()
Definition: access_path.h:738
const auto & full_text_search() const
Definition: access_path.h:590
struct AccessPath::@62::@85 fake_single_row
struct AccessPath::@62::@84 table_value_constructor
bool can_be_used_for_ror
Definition: access_path.h:1017
float rec_per_key
Definition: access_path.h:1145
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:315
JOIN * join
Definition: access_path.h:1227
struct AccessPath::@62::@73 mrr
unsigned index
Definition: access_path.h:1013
unsigned mrr_buf_size
Definition: access_path.h:1009
struct AccessPath::@62::@95 temptable_aggregate
auto & remove_duplicates()
Definition: access_path.h:842
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:385
struct AccessPath::@62::@64 sample_scan
const auto & stream() const
Definition: access_path.h:798
const auto & remove_duplicates() const
Definition: access_path.h:846
auto & window()
Definition: access_path.h:826
bool need_rows_in_rowid_order
Definition: access_path.h:1021
Table_ref * table_list
Definition: access_path.h:1247
const auto & unqualified_count() const
Definition: access_path.h:686
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:519
struct AccessPath::@62::@92 filter
void set_init_cost(double val)
Definition: access_path.h:400
void set_num_output_rows(double val)
Definition: access_path.h:893
bool is_covering
Definition: access_path.h:1070
const auto & mrr() const
Definition: access_path.h:606
void set_cost_before_filter(double val)
Definition: access_path.h:410
bool remove_duplicates
Definition: access_path.h:1200
table_map tables_to_update
Definition: access_path.h:1295
const auto & table_value_constructor() const
Definition: access_path.h:694
KEY * key
Definition: access_path.h:1172
auto & table_value_constructor()
Definition: access_path.h:690
auto & stream()
Definition: access_path.h:794
size_t key_len
Definition: access_path.h:1173
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:485
const auto & zero_rows() const
Definition: access_path.h:710
const auto & append() const
Definition: access_path.h:822
ha_rows offset
Definition: access_path.h:1218
int idx
Definition: access_path.h:944
auto & hash_join()
Definition: access_path.h:722
auto & cache_invalidator()
Definition: access_path.h:866
bool force_sort_rowids
Definition: access_path.h:1202
unsigned num_used_key_parts
Definition: access_path.h:1014
unsigned num_ranges
Definition: access_path.h:1006
ORDER * order
Definition: access_path.h:1198
auto & sort()
Definition: access_path.h:762
auto & fake_single_row()
Definition: access_path.h:698
struct AccessPath::@62::@77 rowid_intersection
struct AccessPath::@62::@75 index_range_scan
ha_rows limit
Definition: access_path.h:1199
auto & follow_tail()
Definition: access_path.h:610
double m_num_output_rows
Expected number of output rows.
Definition: access_path.h:897
const auto & sample_scan() const
Definition: access_path.h:534
const JoinPredicate * join_predicate
Definition: access_path.h:1135
const auto & const_table() const
Definition: access_path.h:598
const auto & dynamic_index_range_scan() const
Definition: access_path.h:670
const auto & table_scan() const
Definition: access_path.h:526
unsigned mrr_flags
Definition: access_path.h:1008
int ref_slice
Definition: access_path.h:1213
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:446
bool can_be_used_for_imerge
Definition: access_path.h:1024
bool forced_by_hint
Definition: access_path.h:1041
auto & limit_offset()
Definition: access_path.h:786
AccessPath * inner
Definition: access_path.h:1134
struct AccessPath::@62::@87 zero_rows_aggregated
QUICK_RANGE * range
Definition: access_path.h:951
bool provide_rowid
Definition: access_path.h:1230
const auto & index_range_scan() const
Definition: access_path.h:622
const auto & ref() const
Definition: access_path.h:558
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:329
Item_func_match * ft_func
Definition: access_path.h:980
ha_rows * send_records_override
Definition: access_path.h:1223
struct AccessPath::@62::@81 dynamic_index_range_scan
auto & table_scan()
Definition: access_path.h:522
struct AccessPath::@62::@83 unqualified_count
const auto & hash_join() const
Definition: access_path.h:726
QEP_TAB * qep_tab
Definition: access_path.h:1098
Table_function * table_function
Definition: access_path.h:1102
auto & zero_rows_aggregated()
Definition: access_path.h:714
const auto & sort() const
Definition: access_path.h:766
bool unwrap_rollup
Definition: access_path.h:1201
Type
Definition: access_path.h:229
@ FOLLOW_TAIL
Definition: access_path.h:244
@ FILTER
Definition: access_path.h:268
@ PUSHED_JOIN_REF
Definition: access_path.h:240
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:257
@ UPDATE_ROWS
Definition: access_path.h:286
@ AGGREGATE
Definition: access_path.h:270
@ BKA_JOIN
Definition: access_path.h:264
@ ZERO_ROWS
Definition: access_path.h:256
@ CONST_TABLE
Definition: access_path.h:242
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:250
@ SAMPLE_SCAN
Definition: access_path.h:234
@ INDEX_RANGE_SCAN
Definition: access_path.h:245
@ UNQUALIFIED_COUNT
Definition: access_path.h:259
@ EQ_REF
Definition: access_path.h:239
@ FAKE_SINGLE_ROW
Definition: access_path.h:255
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:275
@ WINDOW
Definition: access_path.h:277
@ REF_OR_NULL
Definition: access_path.h:238
@ MATERIALIZE
Definition: access_path.h:274
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:263
@ ROWID_UNION
Definition: access_path.h:248
@ INDEX_SKIP_SCAN
Definition: access_path.h:249
@ MRR
Definition: access_path.h:243
@ CACHE_INVALIDATOR
Definition: access_path.h:282
@ INDEX_SCAN
Definition: access_path.h:235
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:254
@ WEEDOUT
Definition: access_path.h:278
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:258
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:280
@ TABLE_SCAN
Definition: access_path.h:233
@ REF
Definition: access_path.h:237
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:271
@ LIMIT_OFFSET
Definition: access_path.h:272
@ APPEND
Definition: access_path.h:276
@ NESTED_LOOP_JOIN
Definition: access_path.h:262
@ INDEX_MERGE
Definition: access_path.h:246
@ FULL_TEXT_SEARCH
Definition: access_path.h:241
@ ALTERNATIVE
Definition: access_path.h:281
@ STREAM
Definition: access_path.h:273
@ REMOVE_DUPLICATES
Definition: access_path.h:279
@ ROWID_INTERSECTION
Definition: access_path.h:247
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:251
@ DELETE_ROWS
Definition: access_path.h:285
@ SORT
Definition: access_path.h:269
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:236
@ HASH_JOIN
Definition: access_path.h:265
bool retrieve_full_rows
Definition: access_path.h:1053
double m_init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:921
struct AccessPath::@62::@91 nested_loop_semijoin_with_duplicate_removal
const auto & rowid_union() const
Definition: access_path.h:646
const TABLE * table
Definition: access_path.h:1171
const auto & index_skip_scan() const
Definition: access_path.h:654
auto & dynamic_index_range_scan()
Definition: access_path.h:666
const auto & pushed_join_ref() const
Definition: access_path.h:582
auto & remove_duplicates_on_index()
Definition: access_path.h:850
table_map immediate_tables
Definition: access_path.h:1291
union AccessPath::@62 u
double m_cost_before_filter
If no filter, identical to cost.
Definition: access_path.h:925
double cost() const
Definition: access_path.h:387
int mrr_flags
Definition: access_path.h:990
auto & full_text_search()
Definition: access_path.h:586
const auto & materialize() const
Definition: access_path.h:806
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:379
auto & eq_ref()
Definition: access_path.h:570
JoinType join_type
Definition: access_path.h:1143
struct AccessPath::@62::@98 materialize
const auto & limit_offset() const
Definition: access_path.h:790
MaterializePathParameters * param
Definition: access_path.h:1241
const char * cause
Definition: access_path.h:1125
auto & update_rows()
Definition: access_path.h:882
auto & index_merge()
Definition: access_path.h:626
auto & aggregate()
Definition: access_path.h:770
double m_cost
Expected cost to read all of this access path once.
Definition: access_path.h:900
const auto & group_index_skip_scan() const
Definition: access_path.h:662
auto & zero_rows()
Definition: access_path.h:706
AccessPath * subquery_path
Definition: access_path.h:1209
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1251
struct AccessPath::@62::@76 index_merge
const auto & nested_loop_join() const
Definition: access_path.h:742
bool already_expanded_predicates
Definition: access_path.h:1153
const auto & remove_duplicates_on_index() const
Definition: access_path.h:854
Temp_table_param * temp_table_param
Definition: access_path.h:1210
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:746
bool reverse
Definition: access_path.h:946
AccessPath * table_scan_path
Definition: access_path.h:1278
enum tablesample_type sampling_type
Definition: access_path.h:940
struct AccessPath::@62::@70 pushed_join_ref
const auto & materialize_information_schema_table() const
Definition: access_path.h:814
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1243
bool geometry
Definition: access_path.h:1030
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:321
unsigned loosescan_key_len
Definition: access_path.h:1275
auto & index_distance_scan()
Definition: access_path.h:546
auto & ref()
Definition: access_path.h:554
struct AccessPath::@62::@108 update_rows
struct AccessPath::@62::@104 remove_duplicates_on_index
table_map tables_to_delete_from
Definition: access_path.h:1290
double init_once_cost() const
Definition: access_path.h:391
struct AccessPath::@62::@93 sort
struct AccessPath::@62::@65 index_scan
IndexSkipScanParameters * param
Definition: access_path.h:1085
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:465
Mem_root_array< AccessPath * > * children
Definition: access_path.h:1043
struct AccessPath::@62::@66 index_distance_scan
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:474
AccessPath * table_path
Definition: access_path.h:1103
const auto & aggregate() const
Definition: access_path.h:774
TABLE * table
Definition: access_path.h:935
auto & append()
Definition: access_path.h:818
double cost_before_filter() const
Definition: access_path.h:393
const auto & weedout() const
Definition: access_path.h:838
struct AccessPath::@62::@82 materialized_table_function
const auto & cache_invalidator() const
Definition: access_path.h:870
double sampling_percentage
Definition: access_path.h:939
bool needs_buffering
Definition: access_path.h:1259
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:373
QUICK_RANGE ** ranges
Definition: access_path.h:1005
bool is_unique
Definition: access_path.h:973
const auto & alternative() const
Definition: access_path.h:862
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:296
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:305
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:311
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:298
auto & const_table()
Definition: access_path.h:594
unsigned mrr_length_per_rec
Definition: access_path.h:1144
struct AccessPath::@62::@72 const_table
const auto & rowid_intersection() const
Definition: access_path.h:638
Mem_root_array< Item_values_column * > * output_refs
Definition: access_path.h:1109
auto & index_range_scan()
Definition: access_path.h:618
double num_output_rows() const
Definition: access_path.h:891
double num_output_rows_before_filter
If no filter, identical to num_output_rows.
Definition: access_path.h:424
const auto & ref_or_null() const
Definition: access_path.h:566
Filesort * filesort
Definition: access_path.h:1192
bool store_rowids
Definition: access_path.h:1137
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:750
Definition: access_path.h:178
JOIN * join
Definition: access_path.h:180
AccessPath * path
Definition: access_path.h:179
Definition: group_index_skip_scan_plan.h:45
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:73
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:67
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:78
int semijoin_group_size
Definition: access_path.h:124
FunctionalDependencySet functional_dependencies
Definition: access_path.h:95
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:100
RelationalExpression * expr
Definition: access_path.h:79
double selectivity
Definition: access_path.h:80
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:118
Item ** semijoin_group
Definition: access_path.h:123
size_t estimated_bytes_per_row
Definition: access_path.h:84
Definition: range_optimizer.h:55
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:83
Definition: materialize_path_parameters.h:42
AccessPath * subquery_path
Definition: materialize_path_parameters.h:43
int select_number
Definition: materialize_path_parameters.h:44
JOIN * join
Definition: materialize_path_parameters.h:45
bool copy_items
Definition: materialize_path_parameters.h:47
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:48
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:46
Definition: materialize_path_parameters.h:40
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:80
Mem_root_array< Operand > m_operands
Definition: materialize_path_parameters.h:58
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:65
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:99
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:92
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:62
int ref_slice
Definition: materialize_path_parameters.h:74
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:68
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:59
Definition: table.h:286
A position of table within a join order.
Definition: sql_select.h:355
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:132
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:145
bool was_join_condition
Definition: access_path.h:158
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:170
FunctionalDependencySet functional_dependencies
Definition: access_path.h:169
int source_multiple_equality_idx
Definition: access_path.h:166
hypergraph::NodeMap used_nodes
Definition: access_path.h:136
Item * condition
Definition: access_path.h:133
double selectivity
Definition: access_path.h:147
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:175
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:145
Definition: table.h:1406
tablesample_type
Definition: tablesample.h:27