MySQL 9.0.0
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2024, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
26
27#include <assert.h>
28#include <stdint.h>
29#include <cstddef>
30#include <type_traits>
31#include <utility>
32#include <vector>
33
34#include "my_alloc.h"
35#include "my_base.h"
36#include "my_table_map.h"
37#include "sql/item.h"
38// IWYU suggests removing row_iterator.h, but then the inlined short form of
39// CreateIteratorFromAccessPath() fails to compile. So use a pragma to keep it.
40#include "sql/iterators/row_iterator.h" // IWYU pragma: keep
45#include "sql/join_type.h"
46#include "sql/mem_root_array.h"
47#include "sql/olap.h"
48#include "sql/sql_class.h"
49#include "sql/table.h"
50
52class Filesort;
54class Item_func_match;
55class JOIN;
56class KEY;
58class QEP_TAB;
59class QUICK_RANGE;
60class SJ_TMP_TABLE;
61class Table_function;
63class Window;
64struct AccessPath;
67struct Index_lookup;
68struct KEY_PART;
69struct POSITION;
71
72/**
73 A specification that two specific relational expressions
74 (e.g., two tables, or a table and a join between two other tables)
75 should be joined together. The actual join conditions, if any,
76 live inside the “expr” object, as does the join type etc.
77 */
81
82 // If this join is made using a hash join, estimates the width
83 // of each row as stored in the hash table, in bytes.
85
86 // The set of (additional) functional dependencies that are active
87 // after this join predicate has been applied. E.g. if we're joining
88 // on t1.x = t2.x, there will be a bit for that functional dependency.
89 // We don't currently support more complex join conditions, but there's
90 // no conceptual reason why we couldn't, e.g. a join on a = b + c
91 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
92 // or {a, c} → b.
93 //
94 // Used in the processing of interesting orders.
96
97 // A less compact form of functional_dependencies, used during building
98 // (FunctionalDependencySet bitmaps are only available after all functional
99 // indexes have been collected and Build() has been called).
101
102 // A semijoin on the following format:
103 //
104 // SELECT ... FROM t1 WHERE EXISTS
105 // (SELECT ... FROM t2 WHERE t1.f1=t2.f2 AND t1.f3=t2.f4 ... t1.fn=t2.fm)
106 //
107 // may be transformed into an equivalent inner join:
108 //
109 // SELECT ... FROM (SELECT DISTINCT f2, f4...fm FROM t2) d JOIN t1
110 // ON t1.f1=d.f2 AND t1.f3=d.f4 ... t1.fn=d.fm
111 //
112 // If this is a suitable semijoin: This field will identify the the
113 // grouping given by (f2, f4..fm). (@see
114 // LogicalOrderings::RemapOrderingIndex() for a description of how this
115 // value can be mapped to an actual ordering). The join
116 // optimizer will then consider deduplicating on it and applying the
117 // above transform. If no such grouping was found, this field will be -1.
119
120 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
121 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
122 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
123 Item **semijoin_group = nullptr;
125};
126
127/**
128 A filter of some sort that is not a join condition (those are stored
129 in JoinPredicate objects). AND conditions are typically split up into
130 multiple Predicates.
131 */
132struct Predicate {
134
135 // condition->used_tables(), converted to a NodeMap.
137
138 // tables referred to by the condition, plus any tables whose values
139 // can null any of those tables. (Even when reordering outer joins,
140 // at least one of those tables will still be present on the
141 // left-hand side of the outer join, so this is sufficient.)
142 //
143 // As a special case, we allow setting RAND_TABLE_BIT, even though it
144 // is normally part of a table_map, not a NodeMap.
146
148
149 // Whether this predicate is a join condition after all; it was promoted
150 // to a WHERE predicate since it was part of a cycle (see the comment in
151 // AddCycleEdges()). If it is, it is usually ignored so that we don't
152 // double-apply join conditions -- but if the join in question was not
153 // applied (because the cycle was broken at this point), the predicate
154 // would come into play. This is normally registered on the join itself
155 // (see RelationalExpression::join_predicate_bitmap), but having the bit
156 // on the predicate itself is used to avoid trying to push it down as a
157 // sargable predicate.
158 bool was_join_condition = false;
159
160 // If this is a join condition that came from a multiple equality,
161 // and we have decided to create a mesh from that multiple equality,
162 // returns the index of it into the “multiple_equalities” array
163 // in MakeJoinHypergraph(). (You don't actually need the array to
164 // use this; it's just an opaque index to deduplicate between different
165 // predicates.) Otherwise, -1.
167
168 // See the equivalent fields in JoinPredicate.
171
172 // The list of all subqueries referred to in this predicate, if any.
173 // The optimizer uses this to add their materialized/non-materialized
174 // costs when evaluating filters.
176};
177
181};
182
183/// To indicate that a row estimate is not yet made.
184constexpr double kUnknownRowCount = -1.0;
185
186/// To indicate that a cost estimate is not yet made. We use a large negative
187/// value to avoid getting a positive result if we by mistake add this to
188/// a real (positive) cost.
189constexpr double kUnknownCost = -1e12;
190
191/**
192 Access paths are a query planning structure that correspond 1:1 to iterators,
193 in that an access path contains pretty much exactly the information
194 needed to instantiate given iterator, plus some information that is only
195 needed during planning, such as costs. (The new join optimizer will extend
196 this somewhat in the future. Some iterators also need the query block,
197 ie., JOIN object, they are part of, but that is implicitly available when
198 constructing the tree.)
199
200 AccessPath objects build on a variant, ie., they can hold an access path of
201 any type (table scan, filter, hash join, sort, etc.), although only one at the
202 same time. Currently, they contain 32 bytes of base information that is common
203 to any access path (type identifier, costs, etc.), and then up to 40 bytes
204 that is type-specific (e.g. for a table scan, the TABLE object). It would be
205 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
206 does not seem to be easy without fairly large contortions.
207
208 We could have solved this by inheritance, but the fixed-size design makes it
209 possible to replace an access path when a better one is found, without
210 introducing a new allocation, which will be important when using them as a
211 planning structure.
212 */
214 // Most of the members are declared with initializers, but bit fields cannot
215 // be declared with initializers until C++20, so we need to define a
216 // constructor to initialize those fields.
217 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
218 // too, and use the default constructor generated by the compiler.
220 : count_examined_rows(false),
222#ifndef NDEBUG
223 ,
224 forced_by_dbug(false)
225#endif
226 {
227 }
228
229 enum Type : uint8_t {
230 // Basic access paths (those with no children, at least nominally).
231 // NOTE: When adding more paths to this section, also update GetBasicTable()
232 // to handle them.
252
253 // Basic access paths that don't correspond to a specific table.
260
261 // Joins.
266
267 // Composite access paths.
283
284 // Access paths that modify tables.
288
289 /// A general enum to describe the safety of a given operation.
290 /// Currently we only use this to describe row IDs, but it can easily
291 /// be reused for safety of updating a table we're reading from
292 /// (the Halloween problem), or just generally unreproducible results
293 /// (e.g. a TABLESAMPLE changing due to external factors).
294 ///
295 /// Less safe values have higher numerical values.
296 enum Safety : uint8_t {
297 /// The given operation is always safe on this access path.
298 SAFE = 0,
299
300 /// The given operation is safe if this access path is scanned once,
301 /// but not if it's scanned multiple times (e.g. used on the inner side
302 /// of a nested-loop join). A typical example of this is a derived table
303 /// or CTE that is rematerialized on each scan, so that references to
304 /// the old values (such as row IDs) are no longer valid.
306
307 /// The given operation is unsafe on this access path, no matter how many
308 /// or few times it's scanned. Often, it may help to materialize it
309 /// (assuming the materialization itself doesn't use the operation
310 /// in question).
311 UNSAFE = 2
312 };
313
314 /// Whether it is safe to get row IDs (for sorting) from this access path.
316
317 /// Whether this access path counts as one that scans a base table,
318 /// and thus should be counted towards examined_rows. It can sometimes
319 /// seem a bit arbitrary which iterators count towards examined_rows
320 /// and which ones do not, so the only canonical reference is the tests.
322
323 /// Whether this access path contains a GROUP_INDEX_SKIP_SCAN
325
326#ifndef NDEBUG
327 /// Whether this access path is forced preferred over all others by means
328 /// of a SET DEBUG force_subplan_0x... statement.
330#endif
331
332 /// For UPDATE and DELETE statements: The node index of a table which can be
333 /// updated or deleted from immediately as the rows are read from the
334 /// iterator, if this path is only read from once. -1 if there is no such
335 /// table in this path.
336 ///
337 /// Note that this is an index into CostingReceiver's array of nodes, and is
338 /// not necessarily equal to the table number within the query block given by
339 /// Table_ref::tableno().
340 ///
341 /// The table, if any, is currently always the outermost table in the path.
342 ///
343 /// It is possible to have plans where it would be safe to operate
344 /// "immediately" on more than one table. For example, if we do a merge join,
345 /// it is safe to perform immediate deletes on tables on the inner side of the
346 /// join, since both sides are read only once. (However, we currently do not
347 /// support merge joins.)
348 ///
349 /// Another possibility is when the outer table of a nested loop join is
350 /// guaranteed to return at most one row (typically, a unique index lookup
351 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
352 /// nested loop join. But we don't to this yet.
353 ///
354 /// Hash joins read both sides exactly once, However, with hash joins, the
355 /// scans on the inner tables are not positioned on the correct row when the
356 /// result of the join is returned, so the immediate delete logic will need to
357 /// be changed to reposition the underlying scans before doing the immediate
358 /// deletes. While this can be done, it makes the benefit of immediate deletes
359 /// less obvious for these tables, and it can also be a loss in some cases,
360 /// because we lose the deduplication provided by the Unique object used for
361 /// buffered deletes (the immediate deletes could end up spending time
362 /// repositioning to already deleted rows). So we currently don't attempt to
363 /// do immediate deletes from inner tables of hash joins either.
364 ///
365 /// The outer table of a hash join can be deleted from immediately if the
366 /// inner table fits in memory. If the hash join spills to disk, though,
367 /// neither the rows of the outer table nor the rows of the inner table come
368 /// out in the order of the underlying scan, so it is not safe in general to
369 /// perform immediate deletes on the outer table of a hash join.
370 ///
371 /// If support for immediate operations on multiple tables is added,
372 /// this member could be changed from a node index to a NodeMap.
374
375 /// Which ordering the rows produced by this path follow, if any
376 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
377 /// but we don't want to add a dependency on interesting_orders.h from
378 /// this file, so we use the base type instead of the typedef here.
380
381 /// If an iterator has been instantiated for this access path, points to the
382 /// iterator. Used for constructing iterators that need to talk to each other
383 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
384 /// information in EXPLAIN ANALYZE queries.
386
387 double cost() const { return m_cost; }
388
389 double init_cost() const { return m_init_cost; }
390
391 double init_once_cost() const { return m_init_once_cost; }
392
393 double cost_before_filter() const { return m_cost_before_filter; }
394
395 void set_cost(double val) {
396 assert(val >= 0.0 || val == kUnknownCost);
397 m_cost = val;
398 }
399
400 void set_init_cost(double val) {
401 assert(val >= 0.0 || val == kUnknownCost);
402 m_init_cost = val;
403 }
404
405 void set_init_once_cost(double val) {
406 assert(val >= 0.0);
407 m_init_once_cost = val;
408 }
409
410 void set_cost_before_filter(double val) {
411 assert(val >= 0.0 || val == kUnknownCost);
413 }
414
415 /// Return the cost of scanning the given path for the second time
416 /// (or later) in the given query block. This is really the interesting
417 /// metric, not init_once_cost in itself, but since nearly all paths
418 /// have zero init_once_cost, storing that instead allows us to skip
419 /// a lot of repeated path->init_once_cost = path->init_cost calls
420 /// in the code.
421 double rescan_cost() const { return cost() - init_once_cost(); }
422
423 /// If no filter, identical to num_output_rows.
425
426 /// Bitmap of WHERE predicates that we are including on this access path,
427 /// referring to the “predicates” array internal to the join optimizer.
428 /// Since bit masks are much cheaper to deal with than creating Item
429 /// objects, and we don't invent new conditions during join optimization
430 /// (all of them are known when we begin optimization), we stick to
431 /// manipulating bit masks during optimization, saying which filters will be
432 /// applied at this node (a 1-bit means the filter will be applied here; if
433 /// there are multiple ones, they are ANDed together).
434 ///
435 /// This is used during join optimization only; before iterators are
436 /// created, we will add FILTER access paths to represent these instead,
437 /// removing the dependency on the array. Said FILTER paths are by
438 /// convention created with materialize_subqueries = false, since the by far
439 /// most common case is that there are no subqueries in the predicate.
440 /// In other words, if you wish to represent a filter with
441 /// materialize_subqueries = true, you will need to make an explicit FILTER
442 /// node.
443 ///
444 /// See also nested_loop_join().equijoin_predicates, which is for filters
445 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
447
448 /// Bitmap of sargable join predicates that have already been applied
449 /// in this access path by means of an index lookup (ref access),
450 /// again referring to “predicates”, and thus should not be counted again
451 /// for selectivity. Note that the filter may need to be applied
452 /// nevertheless (especially in case of type conversions); see
453 /// subsumed_sargable_join_predicates.
454 ///
455 /// Since these refer to the same array as filter_predicates, they will
456 /// never overlap with filter_predicates, and so we can reuse the same
457 /// memory using an alias (a union would not be allowed, since OverflowBitset
458 /// is a class with non-trivial default constructor), even though the meaning
459 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
460 /// bits 0..(N-1) belong to filter_predicates, and the rest to
461 /// applied_sargable_join_predicates.
463 return filter_predicates;
464 }
466 return filter_predicates;
467 }
468
469 /// Bitmap of WHERE predicates that touch tables we have joined in,
470 /// but that we could not apply yet (for instance because they reference
471 /// other tables, or because because we could not push them down into
472 /// the nullable side of outer joins). Used during planning only
473 /// (see filter_predicates).
475
476 /// Similar to applied_sargable_join_predicates, bitmap of sargable
477 /// join predicates that have been applied and will subsume the join
478 /// predicate entirely, ie., not only should the selectivity not be
479 /// double-counted, but the predicate itself is redundant and need not
480 /// be applied as a filter. (It is an error to have a bit set here but not
481 /// in applied_sargable_join_predicates.)
483 return delayed_predicates;
484 }
486 return delayed_predicates;
487 }
488
489 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
490 /// loaded when rows from this access path are evaluated; that is, this
491 /// access path must be put on the inner side of a nested-loop join (or
492 /// multiple such joins) where the outer side includes all of the given
493 /// tables.
494 ///
495 /// The most obvious case for this is dependent tables in LATERAL, but a more
496 /// common case is when we have pushed join conditions referring to those
497 /// tables; e.g., if this access path represents t1 and we have a condition
498 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
499 /// be set in this bitmap. We can still join in other tables, deferring t2,
500 /// but the bit(s) will then propagate, and we cannot be on the right side of
501 /// a hash join until parameter_tables is zero again. (Also see
502 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
503 /// as an optimization.)
504 ///
505 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
506 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
507 /// that the access path is entirely noncachable, because it depends on
508 /// something nondeterministic or an outer reference, and thus can never be on
509 /// the right side of a hash join, ever.
511
512 /// Auxiliary data used by a secondary storage engine while processing the
513 /// access path during optimization and execution. The secondary storage
514 /// engine is free to store any useful information in this member, for example
515 /// extra statistics or cost estimates. The data pointed to is fully owned by
516 /// the secondary storage engine, and it is the responsibility of the
517 /// secondary engine to manage the memory and make sure it is properly
518 /// destroyed.
519 void *secondary_engine_data{nullptr};
520
521 // Accessors for the union below.
522 auto &table_scan() {
523 assert(type == TABLE_SCAN);
524 return u.table_scan;
525 }
526 const auto &table_scan() const {
527 assert(type == TABLE_SCAN);
528 return u.table_scan;
529 }
530 auto &sample_scan() {
531 assert(type == SAMPLE_SCAN);
532 return u.sample_scan;
533 }
534 const auto &sample_scan() const {
535 assert(type == SAMPLE_SCAN);
536 return u.sample_scan;
537 }
538 auto &index_scan() {
539 assert(type == INDEX_SCAN);
540 return u.index_scan;
541 }
542 const auto &index_scan() const {
543 assert(type == INDEX_SCAN);
544 return u.index_scan;
545 }
547 assert(type == INDEX_DISTANCE_SCAN);
548 return u.index_distance_scan;
549 }
550 const auto &index_distance_scan() const {
551 assert(type == INDEX_DISTANCE_SCAN);
552 return u.index_distance_scan;
553 }
554 auto &ref() {
555 assert(type == REF);
556 return u.ref;
557 }
558 const auto &ref() const {
559 assert(type == REF);
560 return u.ref;
561 }
562 auto &ref_or_null() {
563 assert(type == REF_OR_NULL);
564 return u.ref_or_null;
565 }
566 const auto &ref_or_null() const {
567 assert(type == REF_OR_NULL);
568 return u.ref_or_null;
569 }
570 auto &eq_ref() {
571 assert(type == EQ_REF);
572 return u.eq_ref;
573 }
574 const auto &eq_ref() const {
575 assert(type == EQ_REF);
576 return u.eq_ref;
577 }
579 assert(type == PUSHED_JOIN_REF);
580 return u.pushed_join_ref;
581 }
582 const auto &pushed_join_ref() const {
583 assert(type == PUSHED_JOIN_REF);
584 return u.pushed_join_ref;
585 }
587 assert(type == FULL_TEXT_SEARCH);
588 return u.full_text_search;
589 }
590 const auto &full_text_search() const {
591 assert(type == FULL_TEXT_SEARCH);
592 return u.full_text_search;
593 }
594 auto &const_table() {
595 assert(type == CONST_TABLE);
596 return u.const_table;
597 }
598 const auto &const_table() const {
599 assert(type == CONST_TABLE);
600 return u.const_table;
601 }
602 auto &mrr() {
603 assert(type == MRR);
604 return u.mrr;
605 }
606 const auto &mrr() const {
607 assert(type == MRR);
608 return u.mrr;
609 }
610 auto &follow_tail() {
611 assert(type == FOLLOW_TAIL);
612 return u.follow_tail;
613 }
614 const auto &follow_tail() const {
615 assert(type == FOLLOW_TAIL);
616 return u.follow_tail;
617 }
619 assert(type == INDEX_RANGE_SCAN);
620 return u.index_range_scan;
621 }
622 const auto &index_range_scan() const {
623 assert(type == INDEX_RANGE_SCAN);
624 return u.index_range_scan;
625 }
626 auto &index_merge() {
627 assert(type == INDEX_MERGE);
628 return u.index_merge;
629 }
630 const auto &index_merge() const {
631 assert(type == INDEX_MERGE);
632 return u.index_merge;
633 }
635 assert(type == ROWID_INTERSECTION);
636 return u.rowid_intersection;
637 }
638 const auto &rowid_intersection() const {
639 assert(type == ROWID_INTERSECTION);
640 return u.rowid_intersection;
641 }
642 auto &rowid_union() {
643 assert(type == ROWID_UNION);
644 return u.rowid_union;
645 }
646 const auto &rowid_union() const {
647 assert(type == ROWID_UNION);
648 return u.rowid_union;
649 }
651 assert(type == INDEX_SKIP_SCAN);
652 return u.index_skip_scan;
653 }
654 const auto &index_skip_scan() const {
655 assert(type == INDEX_SKIP_SCAN);
656 return u.index_skip_scan;
657 }
659 assert(type == GROUP_INDEX_SKIP_SCAN);
660 return u.group_index_skip_scan;
661 }
662 const auto &group_index_skip_scan() const {
663 assert(type == GROUP_INDEX_SKIP_SCAN);
664 return u.group_index_skip_scan;
665 }
668 return u.dynamic_index_range_scan;
669 }
670 const auto &dynamic_index_range_scan() const {
672 return u.dynamic_index_range_scan;
673 }
676 return u.materialized_table_function;
677 }
678 const auto &materialized_table_function() const {
680 return u.materialized_table_function;
681 }
683 assert(type == UNQUALIFIED_COUNT);
684 return u.unqualified_count;
685 }
686 const auto &unqualified_count() const {
687 assert(type == UNQUALIFIED_COUNT);
688 return u.unqualified_count;
689 }
691 assert(type == TABLE_VALUE_CONSTRUCTOR);
692 return u.table_value_constructor;
693 }
694 const auto &table_value_constructor() const {
695 assert(type == TABLE_VALUE_CONSTRUCTOR);
696 return u.table_value_constructor;
697 }
699 assert(type == FAKE_SINGLE_ROW);
700 return u.fake_single_row;
701 }
702 const auto &fake_single_row() const {
703 assert(type == FAKE_SINGLE_ROW);
704 return u.fake_single_row;
705 }
706 auto &zero_rows() {
707 assert(type == ZERO_ROWS);
708 return u.zero_rows;
709 }
710 const auto &zero_rows() const {
711 assert(type == ZERO_ROWS);
712 return u.zero_rows;
713 }
715 assert(type == ZERO_ROWS_AGGREGATED);
716 return u.zero_rows_aggregated;
717 }
718 const auto &zero_rows_aggregated() const {
719 assert(type == ZERO_ROWS_AGGREGATED);
720 return u.zero_rows_aggregated;
721 }
722 auto &hash_join() {
723 assert(type == HASH_JOIN);
724 return u.hash_join;
725 }
726 const auto &hash_join() const {
727 assert(type == HASH_JOIN);
728 return u.hash_join;
729 }
730 auto &bka_join() {
731 assert(type == BKA_JOIN);
732 return u.bka_join;
733 }
734 const auto &bka_join() const {
735 assert(type == BKA_JOIN);
736 return u.bka_join;
737 }
739 assert(type == NESTED_LOOP_JOIN);
740 return u.nested_loop_join;
741 }
742 const auto &nested_loop_join() const {
743 assert(type == NESTED_LOOP_JOIN);
744 return u.nested_loop_join;
745 }
748 return u.nested_loop_semijoin_with_duplicate_removal;
749 }
752 return u.nested_loop_semijoin_with_duplicate_removal;
753 }
754 auto &filter() {
755 assert(type == FILTER);
756 return u.filter;
757 }
758 const auto &filter() const {
759 assert(type == FILTER);
760 return u.filter;
761 }
762 auto &sort() {
763 assert(type == SORT);
764 return u.sort;
765 }
766 const auto &sort() const {
767 assert(type == SORT);
768 return u.sort;
769 }
770 auto &aggregate() {
771 assert(type == AGGREGATE);
772 return u.aggregate;
773 }
774 const auto &aggregate() const {
775 assert(type == AGGREGATE);
776 return u.aggregate;
777 }
779 assert(type == TEMPTABLE_AGGREGATE);
780 return u.temptable_aggregate;
781 }
782 const auto &temptable_aggregate() const {
783 assert(type == TEMPTABLE_AGGREGATE);
784 return u.temptable_aggregate;
785 }
786 auto &limit_offset() {
787 assert(type == LIMIT_OFFSET);
788 return u.limit_offset;
789 }
790 const auto &limit_offset() const {
791 assert(type == LIMIT_OFFSET);
792 return u.limit_offset;
793 }
794 auto &stream() {
795 assert(type == STREAM);
796 return u.stream;
797 }
798 const auto &stream() const {
799 assert(type == STREAM);
800 return u.stream;
801 }
802 auto &materialize() {
803 assert(type == MATERIALIZE);
804 return u.materialize;
805 }
806 const auto &materialize() const {
807 assert(type == MATERIALIZE);
808 return u.materialize;
809 }
812 return u.materialize_information_schema_table;
813 }
816 return u.materialize_information_schema_table;
817 }
818 auto &append() {
819 assert(type == APPEND);
820 return u.append;
821 }
822 const auto &append() const {
823 assert(type == APPEND);
824 return u.append;
825 }
826 auto &window() {
827 assert(type == WINDOW);
828 return u.window;
829 }
830 const auto &window() const {
831 assert(type == WINDOW);
832 return u.window;
833 }
834 auto &weedout() {
835 assert(type == WEEDOUT);
836 return u.weedout;
837 }
838 const auto &weedout() const {
839 assert(type == WEEDOUT);
840 return u.weedout;
841 }
843 assert(type == REMOVE_DUPLICATES);
844 return u.remove_duplicates;
845 }
846 const auto &remove_duplicates() const {
847 assert(type == REMOVE_DUPLICATES);
848 return u.remove_duplicates;
849 }
852 return u.remove_duplicates_on_index;
853 }
854 const auto &remove_duplicates_on_index() const {
856 return u.remove_duplicates_on_index;
857 }
858 auto &alternative() {
859 assert(type == ALTERNATIVE);
860 return u.alternative;
861 }
862 const auto &alternative() const {
863 assert(type == ALTERNATIVE);
864 return u.alternative;
865 }
867 assert(type == CACHE_INVALIDATOR);
868 return u.cache_invalidator;
869 }
870 const auto &cache_invalidator() const {
871 assert(type == CACHE_INVALIDATOR);
872 return u.cache_invalidator;
873 }
874 auto &delete_rows() {
875 assert(type == DELETE_ROWS);
876 return u.delete_rows;
877 }
878 const auto &delete_rows() const {
879 assert(type == DELETE_ROWS);
880 return u.delete_rows;
881 }
882 auto &update_rows() {
883 assert(type == UPDATE_ROWS);
884 return u.update_rows;
885 }
886 const auto &update_rows() const {
887 assert(type == UPDATE_ROWS);
888 return u.update_rows;
889 }
890
891 double num_output_rows() const { return m_num_output_rows; }
892
893 void set_num_output_rows(double val) { m_num_output_rows = val; }
894
895 private:
896 /// Expected number of output rows.
898
899 /// Expected cost to read all of this access path once.
901
902 /// Expected cost to initialize this access path; ie., cost to read
903 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
904 /// Note that EXPLAIN prints out cost of reading the _first_ row
905 /// because it is easier for the user and also easier to measure in
906 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
907 /// initialization cost, so that is what we use in this member.
908 /// kUnknownCost for unknown.
910
911 /// Of init_cost, how much of the initialization needs only to be done
912 /// once per query block. (This is a cost, not a proportion.)
913 /// Ie., if the access path can reuse some its initialization work
914 /// if Init() is called multiple times, this member will be nonzero.
915 /// A typical example is a materialized table with rematerialize=false;
916 /// the second time Init() is called, it's a no-op. Most paths will have
917 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
918 /// We do not intend to use this field to model cache effects.
919 ///
920 /// This is currently not printed in EXPLAIN, only optimizer trace.
921 double m_init_once_cost{0.0};
922
923 /// If no filter, identical to cost. init_cost is always the same
924 /// (filters have zero initialization cost).
926
927 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
928 // It is private to force all access to be through the type-checking
929 // accessors.
930 //
931 // For information about the meaning of each value, see the corresponding
932 // row iterator constructors.
933 union {
934 struct {
937 struct {
938 TABLE *table;
942 struct {
943 TABLE *table;
944 int idx;
948 struct {
949 TABLE *table;
950 int idx;
952 bool reverse;
954 struct {
955 TABLE *table;
957 bool use_order;
958 bool reverse;
960 struct {
961 TABLE *table;
963 bool use_order;
965 struct {
966 TABLE *table;
969 struct {
970 TABLE *table;
972 bool use_order;
975 struct {
976 TABLE *table;
978 bool use_order;
982 struct {
983 TABLE *table;
986 struct {
987 TABLE *table;
993 struct {
994 TABLE *table;
996 struct {
997 // The key part(s) we are scanning on. Note that this may be an array.
998 // You can get the table we are working on by looking into
999 // used_key_parts[0].field->table (it is not stored directly, to avoid
1000 // going over the AccessPath size limits).
1002
1003 // The actual ranges we are scanning over (originally derived from “key”).
1004 // Not a Bounds_checked_array, to save 4 bytes on the length.
1006 unsigned num_ranges;
1007
1008 unsigned mrr_flags;
1010
1011 // Which index (in the TABLE) we are scanning over, and how many of its
1012 // key parts we are using.
1013 unsigned index;
1015
1016 // If true, the scan can return rows in rowid order.
1018
1019 // If true, the scan _should_ return rows in rowid order.
1020 // Should only be set if can_be_used_for_ror == true.
1022
1023 // If true, this plan can be used for index merge scan.
1025
1026 // See row intersection for more details.
1028
1029 // Whether we are scanning over a geometry key part.
1030 bool geometry : 1;
1031
1032 // Whether we need a reverse scan. Only supported if geometry == false.
1033 bool reverse : 1;
1034
1035 // For a reverse scan, if we are using extended key parts. It is needed,
1036 // to set correct flags when retrieving records.
1039 struct {
1040 TABLE *table;
1045 struct {
1046 TABLE *table;
1048
1049 // Clustered primary key scan, if any.
1051
1052 bool forced_by_hint;
1055
1056 // If true, the first child scan should reuse table->file instead of
1057 // creating its own. This is true if the intersection is the topmost
1058 // range scan, but _not_ if it's below a union. (The reasons for this
1059 // are unknown.) It can also be negated by logic involving
1060 // retrieve_full_rows and is_covering, again for unknown reasons.
1061 //
1062 // This is not only for performance; multi-table delete has a hidden
1063 // dependency on this behavior when running against certain types of
1064 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
1065 // when deleting (and not all table types can transfer the position of one
1066 // handler to another by using position()).
1067 bool reuse_handler;
1068
1069 // true if no row retrieval phase is necessary.
1072 struct {
1073 TABLE *table;
1075 bool forced_by_hint;
1077 struct {
1078 TABLE *table;
1079 unsigned index;
1080 unsigned num_used_key_parts;
1081 bool forced_by_hint;
1082
1083 // Large, and has nontrivial destructors, so split out into
1084 // its own allocation.
1087 struct {
1088 TABLE *table;
1089 unsigned index;
1090 unsigned num_used_key_parts;
1091 bool forced_by_hint;
1092
1093 // Large, so split out into its own allocation.
1096 struct {
1097 TABLE *table;
1098 QEP_TAB *qep_tab; // Used only for buffering.
1100 struct {
1101 TABLE *table;
1105 struct {
1107
1108 struct {
1111 struct {
1112 // No members.
1114 struct {
1115 // The child is optional. It is only used for keeping track of which
1116 // tables are pruned away by this path, and it is only needed when this
1117 // path is on the inner side of an outer join. See ZeroRowsIterator for
1118 // details. The child of a ZERO_ROWS access path will not be visited by
1119 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1120 // only if called with include_pruned_tables = true. No iterator is
1121 // created for the child, and the child is not shown by EXPLAIN.
1123 // Used for EXPLAIN only.
1124 // TODO(sgunders): make an enum.
1125 const char *cause;
1127 struct {
1128 // Used for EXPLAIN only.
1129 // TODO(sgunders): make an enum.
1130 const char *cause;
1132
1133 struct {
1137 bool store_rowids; // Whether we are below a weedout or not.
1141 struct {
1146 bool store_rowids; // Whether we are below a weedout or not.
1149 struct {
1151 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1155
1156 // Equijoin filters to apply before the join, if any.
1157 // Indexes into join_predicate->expr->equijoin_conditions.
1158 // Non-equijoin conditions are always applied.
1159 // If already_expanded_predicates is true, do not re-expand.
1161
1162 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1163 // this struct needs an initializer, or the union would not be
1164 // default-constructible. If we need more than one union member
1165 // with such an initializer, we would probably need to change
1166 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1167 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1168 nullptr, {}};
1169 struct {
1171 const TABLE *table;
1173 size_t key_len;
1175
1176 struct {
1179
1180 // This parameter, unlike nearly all others, is not passed to the the
1181 // actual iterator. Instead, if true, it signifies that when creating
1182 // the iterator, all materializable subqueries in “condition” should be
1183 // materialized (with any in2exists condition removed first). In the
1184 // very rare case that there are two or more such subqueries, this is
1185 // an all-or-nothing decision, for simplicity.
1186 //
1187 // See FinalizeMaterializedSubqueries().
1190 struct {
1194
1195 // If filesort is nullptr: A new filesort will be created at the
1196 // end of optimization, using this order and flags. Otherwise: Only
1197 // used by EXPLAIN.
1204 struct {
1208 struct {
1212 TABLE *table;
1216 struct {
1218 ha_rows limit;
1222 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1223 // See Query_expression::send_records.
1226 struct {
1228 JOIN *join;
1230 TABLE *table;
1232 int ref_slice;
1234 struct {
1235 // NOTE: The only legal access paths within table_path are
1236 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1237 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1239
1240 // Large, and has nontrivial destructors, so split out
1241 // into its own allocation.
1243 /** The total cost of executing the queries that we materialize.*/
1246 struct {
1249 Item *condition;
1251 struct {
1254 struct {
1259 int ref_slice;
1262 struct {
1267 struct {
1272 struct {
1274 TABLE *table;
1275 KEY *key;
1278 struct {
1280
1281 // For the ref.
1285 struct {
1287 const char *name;
1289 struct {
1294 struct {
1299 } u;
1300};
1301static_assert(std::is_trivially_destructible<AccessPath>::value,
1302 "AccessPath must be trivially destructible, as it is allocated "
1303 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1304 "(because multiple candidates during planning could point to "
1305 "the same access paths, and refcounting would be expensive)");
1306static_assert(sizeof(AccessPath) <= 144,
1307 "We are creating a lot of access paths in the join "
1308 "optimizer, so be sure not to bloat it without noticing. "
1309 "(96 bytes for the base, 48 bytes for the variant.)");
1310
1311inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1313 to->set_cost(from.cost());
1314 to->set_init_cost(from.init_cost());
1317 to->safe_for_rowid = from.safe_for_rowid;
1318 to->ordering_state = from.ordering_state;
1320}
1321
1322// Trivial factory functions for all of the types of access paths above.
1323
1325 bool count_examined_rows) {
1326 AccessPath *path = new (thd->mem_root) AccessPath;
1328 path->count_examined_rows = count_examined_rows;
1329 path->table_scan().table = table;
1330 return path;
1331}
1332
1334 double sampling_percentage,
1335 bool count_examined_rows) {
1336 AccessPath *path = new (thd->mem_root) AccessPath;
1338 path->count_examined_rows = count_examined_rows;
1339 path->sample_scan().table = table;
1340 path->sample_scan().sampling_percentage = sampling_percentage;
1341 return path;
1342}
1343
1345 bool use_order, bool reverse,
1346 bool count_examined_rows) {
1347 AccessPath *path = new (thd->mem_root) AccessPath;
1349 path->count_examined_rows = count_examined_rows;
1350 path->index_scan().table = table;
1351 path->index_scan().idx = idx;
1352 path->index_scan().use_order = use_order;
1353 path->index_scan().reverse = reverse;
1354 return path;
1355}
1356
1358 bool use_order, bool reverse,
1359 bool count_examined_rows) {
1360 AccessPath *path = new (thd->mem_root) AccessPath;
1361 path->type = AccessPath::REF;
1362 path->count_examined_rows = count_examined_rows;
1363 path->ref().table = table;
1364 path->ref().ref = ref;
1365 path->ref().use_order = use_order;
1366 path->ref().reverse = reverse;
1367 return path;
1368}
1369
1371 Index_lookup *ref, bool use_order,
1372 bool count_examined_rows) {
1373 AccessPath *path = new (thd->mem_root) AccessPath;
1375 path->count_examined_rows = count_examined_rows;
1376 path->ref_or_null().table = table;
1377 path->ref_or_null().ref = ref;
1378 path->ref_or_null().use_order = use_order;
1379 return path;
1380}
1381
1383 bool count_examined_rows) {
1384 AccessPath *path = new (thd->mem_root) AccessPath;
1385 path->type = AccessPath::EQ_REF;
1386 path->count_examined_rows = count_examined_rows;
1387 path->eq_ref().table = table;
1388 path->eq_ref().ref = ref;
1389 return path;
1390}
1391
1393 Index_lookup *ref, bool use_order,
1394 bool is_unique,
1395 bool count_examined_rows) {
1396 AccessPath *path = new (thd->mem_root) AccessPath;
1398 path->count_examined_rows = count_examined_rows;
1399 path->pushed_join_ref().table = table;
1400 path->pushed_join_ref().ref = ref;
1401 path->pushed_join_ref().use_order = use_order;
1402 path->pushed_join_ref().is_unique = is_unique;
1403 return path;
1404}
1405
1408 Item_func_match *ft_func,
1409 bool use_order, bool use_limit,
1410 bool count_examined_rows) {
1411 AccessPath *path = new (thd->mem_root) AccessPath;
1413 path->count_examined_rows = count_examined_rows;
1414 path->full_text_search().table = table;
1415 path->full_text_search().ref = ref;
1416 path->full_text_search().use_order = use_order;
1417 path->full_text_search().use_limit = use_limit;
1418 path->full_text_search().ft_func = ft_func;
1419 return path;
1420}
1421
1424 bool count_examined_rows) {
1425 AccessPath *path = new (thd->mem_root) AccessPath;
1427 path->count_examined_rows = count_examined_rows;
1428 path->set_num_output_rows(1.0);
1429 path->set_cost(0.0);
1430 path->set_init_cost(0.0);
1431 path->set_init_once_cost(0.0);
1432 path->const_table().table = table;
1433 path->const_table().ref = ref;
1434 return path;
1435}
1436
1438 int mrr_flags) {
1439 AccessPath *path = new (thd->mem_root) AccessPath;
1440 path->type = AccessPath::MRR;
1441 path->mrr().table = table;
1442 path->mrr().ref = ref;
1443 path->mrr().mrr_flags = mrr_flags;
1444
1445 // This will be filled in when the BKA iterator is created.
1446 path->mrr().bka_path = nullptr;
1447
1448 return path;
1449}
1450
1452 bool count_examined_rows) {
1453 AccessPath *path = new (thd->mem_root) AccessPath;
1455 path->count_examined_rows = count_examined_rows;
1456 path->follow_tail().table = table;
1457 return path;
1458}
1459
1461 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1462 AccessPath *path = new (thd->mem_root) AccessPath;
1464 path->count_examined_rows = count_examined_rows;
1465 path->dynamic_index_range_scan().table = table;
1466 path->dynamic_index_range_scan().qep_tab = qep_tab;
1467 return path;
1468}
1469
1471 THD *thd, TABLE *table, Table_function *table_function,
1472 AccessPath *table_path) {
1473 AccessPath *path = new (thd->mem_root) AccessPath;
1475 path->materialized_table_function().table = table;
1476 path->materialized_table_function().table_function = table_function;
1477 path->materialized_table_function().table_path = table_path;
1478 return path;
1479}
1480
1482 AccessPath *path = new (thd->mem_root) AccessPath;
1484 return path;
1485}
1486
1488 const JOIN *join);
1489
1491 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1492 KEY *key, size_t key_len) {
1493 AccessPath *path = new (thd->mem_root) AccessPath;
1495 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1496 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1497 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1498 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1499 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1500 return path;
1501}
1502
1504 Item *condition) {
1505 AccessPath *path = new (thd->mem_root) AccessPath;
1506 path->type = AccessPath::FILTER;
1507 path->filter().child = child;
1508 path->filter().condition = condition;
1509 path->filter().materialize_subqueries = false;
1510 path->has_group_skip_scan = child->has_group_skip_scan;
1511 return path;
1512}
1513
1514// Not inline, because it needs access to filesort internals
1515// (which are forward-declared in this file).
1517 ORDER *order, bool count_examined_rows);
1518
1520 olap_type olap) {
1521 AccessPath *path = new (thd->mem_root) AccessPath;
1523 path->aggregate().child = child;
1524 path->aggregate().olap = olap;
1525 path->has_group_skip_scan = child->has_group_skip_scan;
1526 return path;
1527}
1528
1530 THD *thd, AccessPath *subquery_path, JOIN *join,
1531 Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path,
1532 int ref_slice) {
1533 AccessPath *path = new (thd->mem_root) AccessPath;
1535 path->temptable_aggregate().subquery_path = subquery_path;
1536 path->temptable_aggregate().join = join;
1537 path->temptable_aggregate().temp_table_param = temp_table_param;
1538 path->temptable_aggregate().table = table;
1539 path->temptable_aggregate().table_path = table_path;
1540 path->temptable_aggregate().ref_slice = ref_slice;
1541 return path;
1542}
1543
1545 ha_rows limit, ha_rows offset,
1546 bool count_all_rows,
1547 bool reject_multiple_rows,
1548 ha_rows *send_records_override) {
1550 AccessPath *path = new (thd->mem_root) AccessPath;
1552 path->immediate_update_delete_table = child->immediate_update_delete_table;
1553 path->limit_offset().child = child;
1554 path->limit_offset().limit = limit;
1555 path->limit_offset().offset = offset;
1556 path->limit_offset().count_all_rows = count_all_rows;
1557 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1558 path->limit_offset().send_records_override = send_records_override;
1559 path->ordering_state = child->ordering_state;
1560 path->has_group_skip_scan = child->has_group_skip_scan;
1562 return path;
1563}
1564
1566 bool count_examined_rows) {
1567 AccessPath *path = new (thd->mem_root) AccessPath;
1569 path->count_examined_rows = count_examined_rows;
1570 path->set_num_output_rows(1.0);
1571 path->set_cost(0.0);
1572 path->set_init_cost(0.0);
1573 path->set_init_once_cost(0.0);
1574 return path;
1575}
1576
1578 const char *cause) {
1579 AccessPath *path = new (thd->mem_root) AccessPath;
1581 path->zero_rows().child = child;
1582 path->zero_rows().cause = cause;
1583 path->set_num_output_rows(0.0);
1584 path->set_cost(0.0);
1585 path->set_init_cost(0.0);
1586 path->set_init_once_cost(0.0);
1587 path->num_output_rows_before_filter = 0.0;
1588 path->set_cost_before_filter(0.0);
1589 return path;
1590}
1591
1592inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1593 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1594}
1595
1597 const char *cause) {
1598 AccessPath *path = new (thd->mem_root) AccessPath;
1600 path->zero_rows_aggregated().cause = cause;
1601 path->set_num_output_rows(1.0);
1602 path->set_cost(0.0);
1603 path->set_init_cost(0.0);
1604 return path;
1605}
1606
1608 JOIN *join,
1609 Temp_table_param *temp_table_param,
1610 TABLE *table, int ref_slice) {
1611 AccessPath *path = new (thd->mem_root) AccessPath;
1612 path->type = AccessPath::STREAM;
1613 path->stream().child = child;
1614 path->stream().join = join;
1615 path->stream().temp_table_param = temp_table_param;
1616 path->stream().table = table;
1617 path->stream().ref_slice = ref_slice;
1618 // Will be set later if we get a weedout access path as parent.
1619 path->stream().provide_rowid = false;
1620 path->has_group_skip_scan = child->has_group_skip_scan;
1621 return path;
1622}
1623
1626 JOIN *join, bool copy_items,
1627 Temp_table_param *temp_table_param) {
1628 assert(path != nullptr);
1630 MaterializePathParameters::Operand &operand = array[0];
1631 operand.subquery_path = path;
1632 operand.select_number = select_number;
1633 operand.join = join;
1635 operand.copy_items = copy_items;
1636 operand.temp_table_param = temp_table_param;
1637 return array;
1638}
1639
1643 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1644 int ref_slice, bool rematerialize, ha_rows limit_rows,
1645 bool reject_multiple_rows,
1650 param->m_operands = std::move(operands);
1651 if (rematerialize) {
1652 // There's no point in adding invalidators if we're rematerializing
1653 // every time anyway.
1654 param->invalidators = nullptr;
1655 } else {
1656 param->invalidators = invalidators;
1657 }
1658 param->table = table;
1659 param->cte = cte;
1660 param->unit = unit;
1661 param->ref_slice = ref_slice;
1662 param->rematerialize = rematerialize;
1663 param->limit_rows = (table == nullptr || table->is_union_or_table()
1664 ? limit_rows
1665 :
1666 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1667 // see its constructor
1668 HA_POS_ERROR);
1669 param->reject_multiple_rows = reject_multiple_rows;
1670 param->deduplication_reason = dedup_reason;
1671
1672#ifndef NDEBUG
1673 for (MaterializePathParameters::Operand &operand : param->m_operands) {
1674 assert(operand.subquery_path != nullptr);
1675 }
1676#endif
1677
1678 AccessPath *path = new (thd->mem_root) AccessPath;
1680 path->materialize().table_path = table_path;
1681 path->materialize().param = param;
1682 path->materialize().subquery_cost = kUnknownCost;
1683 if (rematerialize) {
1684 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1685 } else {
1686 // The default; this is just to be explicit in the code.
1687 path->safe_for_rowid = AccessPath::SAFE;
1688 }
1689 return path;
1690}
1691
1693 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1694 AccessPath *path = new (thd->mem_root) AccessPath;
1696 path->materialize_information_schema_table().table_path = table_path;
1697 path->materialize_information_schema_table().table_list = table_list;
1698 path->materialize_information_schema_table().condition = condition;
1699 return path;
1700}
1701
1702/// Add path costs c1 and c2, but handle kUnknownCost correctly.
1703inline double AddCost(double c1, double c2) {
1704 // If one is undefined, use the other, as we have nothing else.
1705 if (c1 == kUnknownCost) {
1706 return c2;
1707 } else if (c2 == kUnknownCost) {
1708 return c1;
1709 } else {
1710 return c1 + c2;
1711 }
1712}
1713
1714// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1715// long as the access path.
1717 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1718 AccessPath *path = new (thd->mem_root) AccessPath;
1719 path->type = AccessPath::APPEND;
1720 path->append().children = children;
1721 double num_output_rows = 0.0;
1722 for (const AppendPathParameters &child : *children) {
1723 path->set_cost(AddCost(path->cost(), child.path->cost()));
1724 path->set_init_cost(AddCost(path->init_cost(), child.path->init_cost()));
1725 path->set_init_once_cost(path->init_once_cost() +
1726 child.path->init_once_cost());
1727 num_output_rows += child.path->num_output_rows();
1728 }
1729 path->set_num_output_rows(num_output_rows);
1730 return path;
1731}
1732
1734 Window *window,
1735 Temp_table_param *temp_table_param,
1736 int ref_slice, bool needs_buffering) {
1737 AccessPath *path = new (thd->mem_root) AccessPath;
1738 path->type = AccessPath::WINDOW;
1739 path->window().child = child;
1740 path->window().window = window;
1741 path->window().temp_table = nullptr;
1742 path->window().temp_table_param = temp_table_param;
1743 path->window().ref_slice = ref_slice;
1744 path->window().needs_buffering = needs_buffering;
1745 path->set_num_output_rows(child->num_output_rows());
1746 return path;
1747}
1748
1750 SJ_TMP_TABLE *weedout_table) {
1751 AccessPath *path = new (thd->mem_root) AccessPath;
1752 path->type = AccessPath::WEEDOUT;
1753 path->weedout().child = child;
1754 path->weedout().weedout_table = weedout_table;
1755 path->weedout().tables_to_get_rowid_for =
1756 0; // Must be handled by the caller.
1757 return path;
1758}
1759
1761 Item **group_items,
1762 int group_items_size) {
1763 AccessPath *path = new (thd->mem_root) AccessPath;
1765 path->remove_duplicates().child = child;
1766 path->remove_duplicates().group_items = group_items;
1767 path->remove_duplicates().group_items_size = group_items_size;
1768 path->has_group_skip_scan = child->has_group_skip_scan;
1769 return path;
1770}
1771
1773 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1774 unsigned loosescan_key_len) {
1775 AccessPath *path = new (thd->mem_root) AccessPath;
1777 path->remove_duplicates_on_index().child = child;
1778 path->remove_duplicates_on_index().table = table;
1779 path->remove_duplicates_on_index().key = key;
1780 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1781 path->has_group_skip_scan = child->has_group_skip_scan;
1782 return path;
1783}
1784
1786 AccessPath *table_scan_path,
1787 Index_lookup *used_ref) {
1788 AccessPath *path = new (thd->mem_root) AccessPath;
1790 path->alternative().table_scan_path = table_scan_path;
1791 path->alternative().child = child;
1792 path->alternative().used_ref = used_ref;
1793 return path;
1794}
1795
1797 const char *name) {
1798 AccessPath *path = new (thd->mem_root) AccessPath;
1800 path->cache_invalidator().child = child;
1801 path->cache_invalidator().name = name;
1802 return path;
1803}
1804
1807 table_map immediate_tables);
1808
1811 table_map immediate_tables);
1812
1813/**
1814 Modifies "path" and the paths below it so that they provide row IDs for
1815 all tables.
1816
1817 This also figures out how the row IDs should be retrieved for each table in
1818 the input to the path. If the handler of the table is positioned on the
1819 correct row while reading the input, handler::position() can be called to get
1820 the row ID from the handler. However, if the input iterator returns rows
1821 without keeping the position of the underlying handlers in sync, calling
1822 handler::position() will not be able to provide the row IDs. Specifically,
1823 hash join and BKA join do not keep the underlying handlers positioned on the
1824 right row. Therefore, this function will instruct every hash join or BKA join
1825 below "path" to maintain row IDs in the join buffer, and updating handler::ref
1826 in every input table for each row they return. Then "path" does not need to
1827 call handler::position() to get it (nor should it, since calling it would
1828 overwrite the correct row ID with a stale one).
1829
1830 The tables on which "path" should call handler::position() are stored in a
1831 `tables_to_get_rowid_for` bitset in "path". For all the other tables, it can
1832 assume that handler::ref already contains the correct row ID.
1833 */
1835
1838 bool eligible_for_batch_mode);
1839
1840// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1841// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1842// that does anything else is DynamicRangeIterator.)
1844 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1846 eligible_for_batch_mode);
1847}
1848
1849void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1850 const POSITION *pos, bool is_after_filter,
1851 AccessPath *path);
1852
1853/**
1854 Return the TABLE* referred from 'path' if it is a basic access path,
1855 else a nullptr is returned. Temporary tables, such as those used by
1856 sorting, aggregate and subquery materialization are not returned.
1857*/
1859
1860/**
1861 Returns a map of all tables read when `path` or any of its children are
1862 executed. Only iterators that are part of the same query block as `path`
1863 are considered.
1864
1865 If a table is read that doesn't have a map, specifically the temporary
1866 tables made as part of materialization within the same query block,
1867 RAND_TABLE_BIT will be set as a convention and none of that access path's
1868 children will be included in the map. In this case, the caller will need to
1869 manually go in and find said access path, to ask it for its TABLE object.
1870
1871 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1872 access path (ie., pruned away due to impossible join conditions) will be
1873 included in the map. This is normally what you want, as those tables need to
1874 be included whenever you store NULL flags and the likes, but if you don't
1875 want them (perhaps to specifically check for conditions referring to pruned
1876 tables), you can set it to false.
1877 */
1878table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1879
1880/**
1881 Find the list of all tables used by this root, stopping at materializations.
1882 Used for knowing which tables to sort.
1883 */
1885
1886/**
1887 For each access path in the (sub)tree rooted at “path”, expand any use of
1888 “filter_predicates” into newly-inserted FILTER access paths, using the given
1889 predicate list. This is used after finding an optimal set of access paths,
1890 to normalize the tree so that the remaining consumers do not need to worry
1891 about filter_predicates and cost_before_filter.
1892
1893 “join” is the join that “path” is part of.
1894 */
1896 const Mem_root_array<Predicate> &predicates,
1897 unsigned num_where_predicates);
1898
1899/**
1900 Extracts the Item expression from the given “filter_predicates” corresponding
1901 to the given “mask”.
1902 */
1905 int num_where_predicates);
1906
1907/// Like ExpandFilterAccessPaths(), but expands only the single access path
1908/// at “path”.
1910 const Mem_root_array<Predicate> &predicates,
1911 unsigned num_where_predicates);
1912
1913/// Returns the tables that are part of a hash join.
1915
1916/**
1917 Get the conditions to put into the extra conditions of the HashJoinIterator.
1918 This includes the non-equijoin conditions, as well as any equijoin conditions
1919 on columns that are too big to include in the hash table. (The old optimizer
1920 handles equijoin conditions on long columns elsewhere, so the last part only
1921 applies to the hypergraph optimizer.)
1922
1923 @param mem_root The root on which to allocate memory, if needed.
1924 @param using_hypergraph_optimizer True if using the hypergraph optimizer.
1925 @param equijoin_conditions All the equijoin conditions of the join.
1926 @param other_conditions All the non-equijoin conditions of the join.
1927
1928 @return All the conditions to evaluate as "extra conditions" in
1929 HashJoinIterator, or nullptr on OOM.
1930 */
1932 MEM_ROOT *mem_root, bool using_hypergraph_optimizer,
1933 const std::vector<HashJoinCondition> &equijoin_conditions,
1934 const Mem_root_array<Item *> &other_conditions);
1935
1936/**
1937 Update status variables which count how many scans of various types are used
1938 in a query plan.
1939
1940 The following status variables are updated: Select_scan, Select_full_join,
1941 Select_range, Select_full_range_join, Select_range_check. They are also stored
1942 as performance schema statement events with the same names.
1943
1944 In addition, the performance schema statement events NO_INDEX_USED and
1945 NO_GOOD_INDEX_USED are updated, if appropriate.
1946 */
1947void CollectStatusVariables(THD *thd, const JOIN *top_join,
1948 const AccessPath &top_path);
1949
1950#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
constexpr double kUnknownCost
To indicate that a cost estimate is not yet made.
Definition: access_path.h:189
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1607
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1760
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1796
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1357
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:122
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1311
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1509
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1406
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1634
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1460
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::Operand > operands, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows, MaterializePathParameters::DedupType dedup_reason=MaterializePathParameters::NO_DEDUP)
Definition: access_path.h:1640
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1596
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1785
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:134
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1437
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:272
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1716
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1490
void CollectStatusVariables(THD *thd, const JOIN *top_join, const AccessPath &top_path)
Update status variables which count how many scans of various types are used in a query plan.
Definition: access_path.cc:1648
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1529
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1772
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1565
AccessPath * NewTableValueConstructorAccessPath(const THD *thd, const JOIN *join)
Definition: access_path.cc:166
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1498
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1503
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:184
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:87
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1470
const Mem_root_array< Item * > * GetExtraHashJoinConditions(MEM_ROOT *mem_root, bool using_hypergraph_optimizer, const std::vector< HashJoinCondition > &equijoin_conditions, const Mem_root_array< Item * > &other_conditions)
Get the conditions to put into the extra conditions of the HashJoinIterator.
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:303
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1324
AccessPath * NewSampleScanAccessPath(THD *thd, TABLE *table, double sampling_percentage, bool count_examined_rows)
Definition: access_path.h:1333
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, olap_type olap)
Definition: access_path.h:1519
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1622
double AddCost(double c1, double c2)
Add path costs c1 and c2, but handle kUnknownCost correctly.
Definition: access_path.h:1703
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1692
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1341
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:215
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1370
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1544
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1382
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1733
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1451
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1422
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:488
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1749
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1392
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1577
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1481
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1344
Mem_root_array< MaterializePathParameters::Operand > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1625
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4435
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:54
Sorting related info.
Definition: filesort.h:52
A class that represents a join condition in a hash join.
Definition: item_cmpfunc.h:87
Definition: item_func.h:3526
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:930
Definition: sql_optimizer.h:133
Definition: key.h:113
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:426
Definition: overflow_bitset.h:78
Definition: sql_executor.h:256
Definition: range_optimizer.h:69
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:626
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
Definition: sql_executor.h:95
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:40
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2871
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:97
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:110
static MEM_ROOT mem_root
Definition: client_plugin.cc:114
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a WINDOW AccessPath.
Definition: cost_model.cc:938
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:367
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1873
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:63
JoinType
Definition: join_type.h:28
static mi_bit_type mask[]
Definition: mi_packrec.cc:141
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:477
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1141
#define HA_POS_ERROR
Definition: my_base.h:1143
uint64_t table_map
Definition: my_table_map.h:30
static char * path
Definition: mysqldump.cc:149
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:40
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:132
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:151
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:39
olap_type
Definition: olap.h:31
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
case opt name
Definition: sslopt-case.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:213
auto & weedout()
Definition: access_path.h:834
struct AccessPath::@62::@79 index_skip_scan
struct AccessPath::@62::@86 zero_rows
auto & filter()
Definition: access_path.h:754
bool count_all_rows
Definition: access_path.h:1220
AccessPath * bka_path
Definition: access_path.h:989
struct AccessPath::@62::@63 table_scan
auto & ref_or_null()
Definition: access_path.h:562
auto & materialized_table_function()
Definition: access_path.h:674
AccessPath * cpk_child
Definition: access_path.h:1050
auto & rowid_union()
Definition: access_path.h:642
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:510
const auto & bka_join() const
Definition: access_path.h:734
TABLE * temp_table
Definition: access_path.h:1257
OverflowBitset equijoin_predicates
Definition: access_path.h:1160
Item ** group_items
Definition: access_path.h:1269
struct AccessPath::@62::@102 weedout
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:421
auto & materialize()
Definition: access_path.h:802
const auto & temptable_aggregate() const
Definition: access_path.h:782
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:482
auto & index_scan()
Definition: access_path.h:538
const auto & delete_rows() const
Definition: access_path.h:878
const auto & filter() const
Definition: access_path.h:758
const auto & index_distance_scan() const
Definition: access_path.h:550
auto & delete_rows()
Definition: access_path.h:874
bool reuse_handler
Definition: access_path.h:1027
struct AccessPath::@62::@89 bka_join
KEY_PART * used_key_part
Definition: access_path.h:1001
double m_init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:909
olap_type olap
Definition: access_path.h:1206
struct AccessPath::@62::@71 full_text_search
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:462
Item * condition
Definition: access_path.h:1178
struct AccessPath::@62::@74 follow_tail
struct AccessPath::@62::@90 nested_loop_join
const auto & index_merge() const
Definition: access_path.h:630
const auto & update_rows() const
Definition: access_path.h:886
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:858
auto & pushed_join_ref()
Definition: access_path.h:578
void set_cost(double val)
Definition: access_path.h:395
AccessPath * outer
Definition: access_path.h:1134
auto & index_skip_scan()
Definition: access_path.h:650
struct AccessPath::@62::@69 eq_ref
bool rewrite_semi_to_inner
Definition: access_path.h:1138
const auto & zero_rows_aggregated() const
Definition: access_path.h:718
auto & group_index_skip_scan()
Definition: access_path.h:658
struct AccessPath::@62::@106 cache_invalidator
const auto & eq_ref() const
Definition: access_path.h:574
int group_items_size
Definition: access_path.h:1270
bool use_order
Definition: access_path.h:945
bool pfs_batch_mode
Definition: access_path.h:1152
auto & temptable_aggregate()
Definition: access_path.h:778
AccessPath()
Definition: access_path.h:219
GroupIndexSkipScanParameters * param
Definition: access_path.h:1094
auto & rowid_intersection()
Definition: access_path.h:634
struct AccessPath::@62::@80 group_index_skip_scan
double init_cost() const
Definition: access_path.h:389
struct AccessPath::@62::@105 alternative
bool materialize_subqueries
Definition: access_path.h:1188
AccessPath * child
Definition: access_path.h:1122
bool allow_spill_to_disk
Definition: access_path.h:1136
struct AccessPath::@62::@68 ref_or_null
Index_lookup * used_ref
Definition: access_path.h:1283
struct AccessPath::@62::@97 stream
auto & mrr()
Definition: access_path.h:602
auto & sample_scan()
Definition: access_path.h:530
const auto & index_scan() const
Definition: access_path.h:542
const auto & fake_single_row() const
Definition: access_path.h:702
struct AccessPath::@62::@96 limit_offset
bool reject_multiple_rows
Definition: access_path.h:1221
bool allow_clustered_primary_key_scan
Definition: access_path.h:1042
const char * name
Definition: access_path.h:1287
bool use_limit
Definition: access_path.h:979
Window * window
Definition: access_path.h:1256
table_map tables_to_get_rowid_for
Definition: access_path.h:1139
auto & materialize_information_schema_table()
Definition: access_path.h:810
struct AccessPath::@62::@100 append
auto & bka_join()
Definition: access_path.h:730
struct AccessPath::@62::@94 aggregate
const auto & follow_tail() const
Definition: access_path.h:614
struct AccessPath::@62::@99 materialize_information_schema_table
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1264
bool has_group_skip_scan
Whether this access path contains a GROUP_INDEX_SKIP_SCAN.
Definition: access_path.h:324
Index_lookup * ref
Definition: access_path.h:956
struct AccessPath::@62::@88 hash_join
const auto & window() const
Definition: access_path.h:830
void set_init_once_cost(double val)
Definition: access_path.h:405
const auto & materialized_table_function() const
Definition: access_path.h:678
struct AccessPath::@62::@107 delete_rows
auto & unqualified_count()
Definition: access_path.h:682
struct AccessPath::@62::@78 rowid_union
bool keep_current_rowid
Definition: access_path.h:991
bool using_extended_key_parts
Definition: access_path.h:1037
auto & nested_loop_join()
Definition: access_path.h:738
const auto & full_text_search() const
Definition: access_path.h:590
struct AccessPath::@62::@85 fake_single_row
struct AccessPath::@62::@84 table_value_constructor
bool can_be_used_for_ror
Definition: access_path.h:1017
float rec_per_key
Definition: access_path.h:1145
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:315
JOIN * join
Definition: access_path.h:1210
struct AccessPath::@62::@73 mrr
unsigned index
Definition: access_path.h:1013
unsigned mrr_buf_size
Definition: access_path.h:1009
struct AccessPath::@62::@95 temptable_aggregate
auto & remove_duplicates()
Definition: access_path.h:842
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:385
struct AccessPath::@62::@64 sample_scan
const auto & stream() const
Definition: access_path.h:798
const auto & remove_duplicates() const
Definition: access_path.h:846
auto & window()
Definition: access_path.h:826
bool need_rows_in_rowid_order
Definition: access_path.h:1021
Table_ref * table_list
Definition: access_path.h:1248
const auto & unqualified_count() const
Definition: access_path.h:686
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:519
struct AccessPath::@62::@92 filter
void set_init_cost(double val)
Definition: access_path.h:400
void set_num_output_rows(double val)
Definition: access_path.h:893
bool is_covering
Definition: access_path.h:1070
const auto & mrr() const
Definition: access_path.h:606
void set_cost_before_filter(double val)
Definition: access_path.h:410
bool remove_duplicates
Definition: access_path.h:1200
table_map tables_to_update
Definition: access_path.h:1296
const auto & table_value_constructor() const
Definition: access_path.h:694
KEY * key
Definition: access_path.h:1172
auto & table_value_constructor()
Definition: access_path.h:690
auto & stream()
Definition: access_path.h:794
size_t key_len
Definition: access_path.h:1173
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:485
const auto & zero_rows() const
Definition: access_path.h:710
const auto & append() const
Definition: access_path.h:822
ha_rows offset
Definition: access_path.h:1219
int idx
Definition: access_path.h:944
auto & hash_join()
Definition: access_path.h:722
auto & cache_invalidator()
Definition: access_path.h:866
bool force_sort_rowids
Definition: access_path.h:1202
unsigned num_used_key_parts
Definition: access_path.h:1014
unsigned num_ranges
Definition: access_path.h:1006
ORDER * order
Definition: access_path.h:1198
auto & sort()
Definition: access_path.h:762
auto & fake_single_row()
Definition: access_path.h:698
struct AccessPath::@62::@77 rowid_intersection
struct AccessPath::@62::@75 index_range_scan
ha_rows limit
Definition: access_path.h:1199
auto & follow_tail()
Definition: access_path.h:610
double m_num_output_rows
Expected number of output rows.
Definition: access_path.h:897
const auto & sample_scan() const
Definition: access_path.h:534
const JoinPredicate * join_predicate
Definition: access_path.h:1135
const auto & const_table() const
Definition: access_path.h:598
const auto & dynamic_index_range_scan() const
Definition: access_path.h:670
const auto & table_scan() const
Definition: access_path.h:526
unsigned mrr_flags
Definition: access_path.h:1008
int ref_slice
Definition: access_path.h:1214
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:446
bool can_be_used_for_imerge
Definition: access_path.h:1024
bool forced_by_hint
Definition: access_path.h:1041
auto & limit_offset()
Definition: access_path.h:786
AccessPath * inner
Definition: access_path.h:1134
struct AccessPath::@62::@87 zero_rows_aggregated
QUICK_RANGE * range
Definition: access_path.h:951
bool provide_rowid
Definition: access_path.h:1231
const auto & index_range_scan() const
Definition: access_path.h:622
const auto & ref() const
Definition: access_path.h:558
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:329
Item_func_match * ft_func
Definition: access_path.h:980
ha_rows * send_records_override
Definition: access_path.h:1224
struct AccessPath::@62::@81 dynamic_index_range_scan
auto & table_scan()
Definition: access_path.h:522
struct AccessPath::@62::@83 unqualified_count
const auto & hash_join() const
Definition: access_path.h:726
QEP_TAB * qep_tab
Definition: access_path.h:1098
Table_function * table_function
Definition: access_path.h:1102
auto & zero_rows_aggregated()
Definition: access_path.h:714
const auto & sort() const
Definition: access_path.h:766
bool unwrap_rollup
Definition: access_path.h:1201
Type
Definition: access_path.h:229
@ FOLLOW_TAIL
Definition: access_path.h:244
@ FILTER
Definition: access_path.h:268
@ PUSHED_JOIN_REF
Definition: access_path.h:240
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:257
@ UPDATE_ROWS
Definition: access_path.h:286
@ AGGREGATE
Definition: access_path.h:270
@ BKA_JOIN
Definition: access_path.h:264
@ ZERO_ROWS
Definition: access_path.h:256
@ CONST_TABLE
Definition: access_path.h:242
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:250
@ SAMPLE_SCAN
Definition: access_path.h:234
@ INDEX_RANGE_SCAN
Definition: access_path.h:245
@ UNQUALIFIED_COUNT
Definition: access_path.h:259
@ EQ_REF
Definition: access_path.h:239
@ FAKE_SINGLE_ROW
Definition: access_path.h:255
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:275
@ WINDOW
Definition: access_path.h:277
@ REF_OR_NULL
Definition: access_path.h:238
@ MATERIALIZE
Definition: access_path.h:274
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:263
@ ROWID_UNION
Definition: access_path.h:248
@ INDEX_SKIP_SCAN
Definition: access_path.h:249
@ MRR
Definition: access_path.h:243
@ CACHE_INVALIDATOR
Definition: access_path.h:282
@ INDEX_SCAN
Definition: access_path.h:235
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:254
@ WEEDOUT
Definition: access_path.h:278
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:258
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:280
@ TABLE_SCAN
Definition: access_path.h:233
@ REF
Definition: access_path.h:237
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:271
@ LIMIT_OFFSET
Definition: access_path.h:272
@ APPEND
Definition: access_path.h:276
@ NESTED_LOOP_JOIN
Definition: access_path.h:262
@ INDEX_MERGE
Definition: access_path.h:246
@ FULL_TEXT_SEARCH
Definition: access_path.h:241
@ ALTERNATIVE
Definition: access_path.h:281
@ STREAM
Definition: access_path.h:273
@ REMOVE_DUPLICATES
Definition: access_path.h:279
@ ROWID_INTERSECTION
Definition: access_path.h:247
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:251
@ DELETE_ROWS
Definition: access_path.h:285
@ SORT
Definition: access_path.h:269
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:236
@ HASH_JOIN
Definition: access_path.h:265
bool retrieve_full_rows
Definition: access_path.h:1053
double m_init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:921
struct AccessPath::@62::@91 nested_loop_semijoin_with_duplicate_removal
const auto & rowid_union() const
Definition: access_path.h:646
const TABLE * table
Definition: access_path.h:1171
const auto & index_skip_scan() const
Definition: access_path.h:654
auto & dynamic_index_range_scan()
Definition: access_path.h:666
const auto & pushed_join_ref() const
Definition: access_path.h:582
auto & remove_duplicates_on_index()
Definition: access_path.h:850
table_map immediate_tables
Definition: access_path.h:1292
union AccessPath::@62 u
double m_cost_before_filter
If no filter, identical to cost.
Definition: access_path.h:925
double cost() const
Definition: access_path.h:387
int mrr_flags
Definition: access_path.h:990
auto & full_text_search()
Definition: access_path.h:586
const auto & materialize() const
Definition: access_path.h:806
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:379
auto & eq_ref()
Definition: access_path.h:570
JoinType join_type
Definition: access_path.h:1143
struct AccessPath::@62::@98 materialize
const auto & limit_offset() const
Definition: access_path.h:790
MaterializePathParameters * param
Definition: access_path.h:1242
const char * cause
Definition: access_path.h:1125
auto & update_rows()
Definition: access_path.h:882
auto & index_merge()
Definition: access_path.h:626
auto & aggregate()
Definition: access_path.h:770
double m_cost
Expected cost to read all of this access path once.
Definition: access_path.h:900
const auto & group_index_skip_scan() const
Definition: access_path.h:662
auto & zero_rows()
Definition: access_path.h:706
AccessPath * subquery_path
Definition: access_path.h:1209
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1252
struct AccessPath::@62::@76 index_merge
const auto & nested_loop_join() const
Definition: access_path.h:742
bool already_expanded_predicates
Definition: access_path.h:1153
const auto & remove_duplicates_on_index() const
Definition: access_path.h:854
Temp_table_param * temp_table_param
Definition: access_path.h:1211
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:746
bool reverse
Definition: access_path.h:946
AccessPath * table_scan_path
Definition: access_path.h:1279
enum tablesample_type sampling_type
Definition: access_path.h:940
struct AccessPath::@62::@70 pushed_join_ref
const auto & materialize_information_schema_table() const
Definition: access_path.h:814
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1244
bool geometry
Definition: access_path.h:1030
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:321
unsigned loosescan_key_len
Definition: access_path.h:1276
auto & index_distance_scan()
Definition: access_path.h:546
auto & ref()
Definition: access_path.h:554
struct AccessPath::@62::@108 update_rows
struct AccessPath::@62::@104 remove_duplicates_on_index
table_map tables_to_delete_from
Definition: access_path.h:1291
double init_once_cost() const
Definition: access_path.h:391
struct AccessPath::@62::@93 sort
struct AccessPath::@62::@65 index_scan
IndexSkipScanParameters * param
Definition: access_path.h:1085
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:465
Mem_root_array< AccessPath * > * children
Definition: access_path.h:1043
struct AccessPath::@62::@66 index_distance_scan
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:474
AccessPath * table_path
Definition: access_path.h:1103
const auto & aggregate() const
Definition: access_path.h:774
TABLE * table
Definition: access_path.h:935
auto & append()
Definition: access_path.h:818
double cost_before_filter() const
Definition: access_path.h:393
const auto & weedout() const
Definition: access_path.h:838
struct AccessPath::@62::@82 materialized_table_function
const auto & cache_invalidator() const
Definition: access_path.h:870
double sampling_percentage
Definition: access_path.h:939
bool needs_buffering
Definition: access_path.h:1260
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:373
QUICK_RANGE ** ranges
Definition: access_path.h:1005
bool is_unique
Definition: access_path.h:973
const auto & alternative() const
Definition: access_path.h:862
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:296
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:305
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:311
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:298
auto & const_table()
Definition: access_path.h:594
unsigned mrr_length_per_rec
Definition: access_path.h:1144
struct AccessPath::@62::@72 const_table
const auto & rowid_intersection() const
Definition: access_path.h:638
Mem_root_array< Item_values_column * > * output_refs
Definition: access_path.h:1109
auto & index_range_scan()
Definition: access_path.h:618
double num_output_rows() const
Definition: access_path.h:891
double num_output_rows_before_filter
If no filter, identical to num_output_rows.
Definition: access_path.h:424
const auto & ref_or_null() const
Definition: access_path.h:566
Filesort * filesort
Definition: access_path.h:1192
bool store_rowids
Definition: access_path.h:1137
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:750
Definition: access_path.h:178
JOIN * join
Definition: access_path.h:180
AccessPath * path
Definition: access_path.h:179
Definition: group_index_skip_scan_plan.h:45
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:73
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:67
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:78
int semijoin_group_size
Definition: access_path.h:124
FunctionalDependencySet functional_dependencies
Definition: access_path.h:95
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:100
RelationalExpression * expr
Definition: access_path.h:79
double selectivity
Definition: access_path.h:80
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:118
Item ** semijoin_group
Definition: access_path.h:123
size_t estimated_bytes_per_row
Definition: access_path.h:84
Definition: range_optimizer.h:55
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:83
Definition: materialize_path_parameters.h:42
AccessPath * subquery_path
Definition: materialize_path_parameters.h:43
int select_number
Definition: materialize_path_parameters.h:44
JOIN * join
Definition: materialize_path_parameters.h:45
bool copy_items
Definition: materialize_path_parameters.h:47
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:48
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:46
Definition: materialize_path_parameters.h:40
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:80
DedupType deduplication_reason
Definition: materialize_path_parameters.h:111
Mem_root_array< Operand > m_operands
Definition: materialize_path_parameters.h:58
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:65
DedupType
The context for which deduplication is being used.
Definition: materialize_path_parameters.h:106
@ NO_DEDUP
Definition: materialize_path_parameters.h:109
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:99
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:92
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:62
int ref_slice
Definition: materialize_path_parameters.h:74
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:68
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:59
Definition: table.h:286
A position of table within a join order.
Definition: sql_select.h:355
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:132
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:145
bool was_join_condition
Definition: access_path.h:158
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:170
FunctionalDependencySet functional_dependencies
Definition: access_path.h:169
int source_multiple_equality_idx
Definition: access_path.h:166
hypergraph::NodeMap used_nodes
Definition: access_path.h:136
Item * condition
Definition: access_path.h:133
double selectivity
Definition: access_path.h:147
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:175
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:145
Definition: table.h:1407
tablesample_type
Definition: tablesample.h:27