MySQL 8.3.0
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2023, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
24#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25
26#include <assert.h>
27#include <stdint.h>
28#include <cstddef>
29#include <type_traits>
30#include <utility>
31#include <vector>
32
33#include "my_alloc.h"
34#include "my_base.h"
35#include "my_table_map.h"
36#include "sql/item.h"
37// IWYU suggests removing row_iterator.h, but then the inlined short form of
38// CreateIteratorFromAccessPath() fails to compile. So use a pragma to keep it.
39#include "sql/iterators/row_iterator.h" // IWYU pragma: keep
44#include "sql/join_type.h"
45#include "sql/mem_root_array.h"
46#include "sql/olap.h"
47#include "sql/sql_class.h"
48#include "sql/table.h"
49
51class Filesort;
53class Item_func_match;
54class JOIN;
55class KEY;
57class QEP_TAB;
58class QUICK_RANGE;
59class SJ_TMP_TABLE;
60class Table_function;
62class Window;
63struct AccessPath;
66struct Index_lookup;
67struct KEY_PART;
68struct POSITION;
70
71/**
72 A specification that two specific relational expressions
73 (e.g., two tables, or a table and a join between two other tables)
74 should be joined together. The actual join conditions, if any,
75 live inside the “expr” object, as does the join type etc.
76 */
80
81 // If this join is made using a hash join, estimates the width
82 // of each row as stored in the hash table, in bytes.
84
85 // The set of (additional) functional dependencies that are active
86 // after this join predicate has been applied. E.g. if we're joining
87 // on t1.x = t2.x, there will be a bit for that functional dependency.
88 // We don't currently support more complex join conditions, but there's
89 // no conceptual reason why we couldn't, e.g. a join on a = b + c
90 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
91 // or {a, c} → b.
92 //
93 // Used in the processing of interesting orders.
95
96 // A less compact form of functional_dependencies, used during building
97 // (FunctionalDependencySet bitmaps are only available after all functional
98 // indexes have been collected and Build() has been called).
100
101 // A semijoin on the following format:
102 //
103 // SELECT ... FROM t1 WHERE EXISTS
104 // (SELECT ... FROM t2 WHERE t1.f1=t2.f2 AND t1.f3=t2.f4 ... t1.fn=t2.fm)
105 //
106 // may be transformed into an equivalent inner join:
107 //
108 // SELECT ... FROM (SELECT DISTINCT f2, f4...fm FROM t2) d JOIN t1
109 // ON t1.f1=d.f2 AND t1.f3=d.f4 ... t1.fn=d.fm
110 //
111 // If this is a suitable semijoin: This field will identify the the
112 // grouping given by (f2, f4..fm). (@see
113 // LogicalOrderings::RemapOrderingIndex() for a description of how this
114 // value can be mapped to an actual ordering). The join
115 // optimizer will then consider deduplicating on it and applying the
116 // above transform. If no such grouping was found, this field will be -1.
118
119 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
120 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
121 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
122 Item **semijoin_group = nullptr;
124};
125
126/**
127 A filter of some sort that is not a join condition (those are stored
128 in JoinPredicate objects). AND conditions are typically split up into
129 multiple Predicates.
130 */
131struct Predicate {
133
134 // condition->used_tables(), converted to a NodeMap.
136
137 // tables referred to by the condition, plus any tables whose values
138 // can null any of those tables. (Even when reordering outer joins,
139 // at least one of those tables will still be present on the
140 // left-hand side of the outer join, so this is sufficient.)
141 //
142 // As a special case, we allow setting RAND_TABLE_BIT, even though it
143 // is normally part of a table_map, not a NodeMap.
145
147
148 // Whether this predicate is a join condition after all; it was promoted
149 // to a WHERE predicate since it was part of a cycle (see the comment in
150 // AddCycleEdges()). If it is, it is usually ignored so that we don't
151 // double-apply join conditions -- but if the join in question was not
152 // applied (because the cycle was broken at this point), the predicate
153 // would come into play. This is normally registered on the join itself
154 // (see RelationalExpression::join_predicate_bitmap), but having the bit
155 // on the predicate itself is used to avoid trying to push it down as a
156 // sargable predicate.
157 bool was_join_condition = false;
158
159 // If this is a join condition that came from a multiple equality,
160 // and we have decided to create a mesh from that multiple equality,
161 // returns the index of it into the “multiple_equalities” array
162 // in MakeJoinHypergraph(). (You don't actually need the array to
163 // use this; it's just an opaque index to deduplicate between different
164 // predicates.) Otherwise, -1.
166
167 // See the equivalent fields in JoinPredicate.
170
171 // The list of all subqueries referred to in this predicate, if any.
172 // The optimizer uses this to add their materialized/non-materialized
173 // costs when evaluating filters.
175};
176
180};
181
182/// To indicate that a row estimate is not yet made.
183constexpr double kUnknownRowCount = -1.0;
184
185/// To indicate that a cost estimate is not yet made. We use a large negative
186/// value to avoid getting a positive result if we by mistake add this to
187/// a real (positive) cost.
188constexpr double kUnknownCost = -1e12;
189
190/**
191 Access paths are a query planning structure that correspond 1:1 to iterators,
192 in that an access path contains pretty much exactly the information
193 needed to instantiate given iterator, plus some information that is only
194 needed during planning, such as costs. (The new join optimizer will extend
195 this somewhat in the future. Some iterators also need the query block,
196 ie., JOIN object, they are part of, but that is implicitly available when
197 constructing the tree.)
198
199 AccessPath objects build on a variant, ie., they can hold an access path of
200 any type (table scan, filter, hash join, sort, etc.), although only one at the
201 same time. Currently, they contain 32 bytes of base information that is common
202 to any access path (type identifier, costs, etc.), and then up to 40 bytes
203 that is type-specific (e.g. for a table scan, the TABLE object). It would be
204 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
205 does not seem to be easy without fairly large contortions.
206
207 We could have solved this by inheritance, but the fixed-size design makes it
208 possible to replace an access path when a better one is found, without
209 introducing a new allocation, which will be important when using them as a
210 planning structure.
211 */
213 // Most of the members are declared with initializers, but bit fields cannot
214 // be declared with initializers until C++20, so we need to define a
215 // constructor to initialize those fields.
216 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
217 // too, and use the default constructor generated by the compiler.
219 : count_examined_rows(false),
221#ifndef NDEBUG
222 ,
223 forced_by_dbug(false)
224#endif
225 {
226 }
227
228 enum Type : uint8_t {
229 // Basic access paths (those with no children, at least nominally).
230 // NOTE: When adding more paths to this section, also update GetBasicTable()
231 // to handle them.
250
251 // Basic access paths that don't correspond to a specific table.
258
259 // Joins.
264
265 // Composite access paths.
281
282 // Access paths that modify tables.
286
287 /// A general enum to describe the safety of a given operation.
288 /// Currently we only use this to describe row IDs, but it can easily
289 /// be reused for safety of updating a table we're reading from
290 /// (the Halloween problem), or just generally unreproducible results
291 /// (e.g. a TABLESAMPLE changing due to external factors).
292 ///
293 /// Less safe values have higher numerical values.
294 enum Safety : uint8_t {
295 /// The given operation is always safe on this access path.
296 SAFE = 0,
297
298 /// The given operation is safe if this access path is scanned once,
299 /// but not if it's scanned multiple times (e.g. used on the inner side
300 /// of a nested-loop join). A typical example of this is a derived table
301 /// or CTE that is rematerialized on each scan, so that references to
302 /// the old values (such as row IDs) are no longer valid.
304
305 /// The given operation is unsafe on this access path, no matter how many
306 /// or few times it's scanned. Often, it may help to materialize it
307 /// (assuming the materialization itself doesn't use the operation
308 /// in question).
309 UNSAFE = 2
310 };
311
312 /// Whether it is safe to get row IDs (for sorting) from this access path.
314
315 /// Whether this access path counts as one that scans a base table,
316 /// and thus should be counted towards examined_rows. It can sometimes
317 /// seem a bit arbitrary which iterators count towards examined_rows
318 /// and which ones do not, so the only canonical reference is the tests.
320
321 /// Whether this access path contains a GROUP_INDEX_SKIP_SCAN
323
324#ifndef NDEBUG
325 /// Whether this access path is forced preferred over all others by means
326 /// of a SET DEBUG force_subplan_0x... statement.
328#endif
329
330 /// For UPDATE and DELETE statements: The node index of a table which can be
331 /// updated or deleted from immediately as the rows are read from the
332 /// iterator, if this path is only read from once. -1 if there is no such
333 /// table in this path.
334 ///
335 /// Note that this is an index into CostingReceiver's array of nodes, and is
336 /// not necessarily equal to the table number within the query block given by
337 /// Table_ref::tableno().
338 ///
339 /// The table, if any, is currently always the outermost table in the path.
340 ///
341 /// It is possible to have plans where it would be safe to operate
342 /// "immediately" on more than one table. For example, if we do a merge join,
343 /// it is safe to perform immediate deletes on tables on the inner side of the
344 /// join, since both sides are read only once. (However, we currently do not
345 /// support merge joins.)
346 ///
347 /// Another possibility is when the outer table of a nested loop join is
348 /// guaranteed to return at most one row (typically, a unique index lookup
349 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
350 /// nested loop join. But we don't to this yet.
351 ///
352 /// Hash joins read both sides exactly once, However, with hash joins, the
353 /// scans on the inner tables are not positioned on the correct row when the
354 /// result of the join is returned, so the immediate delete logic will need to
355 /// be changed to reposition the underlying scans before doing the immediate
356 /// deletes. While this can be done, it makes the benefit of immediate deletes
357 /// less obvious for these tables, and it can also be a loss in some cases,
358 /// because we lose the deduplication provided by the Unique object used for
359 /// buffered deletes (the immediate deletes could end up spending time
360 /// repositioning to already deleted rows). So we currently don't attempt to
361 /// do immediate deletes from inner tables of hash joins either.
362 ///
363 /// The outer table of a hash join can be deleted from immediately if the
364 /// inner table fits in memory. If the hash join spills to disk, though,
365 /// neither the rows of the outer table nor the rows of the inner table come
366 /// out in the order of the underlying scan, so it is not safe in general to
367 /// perform immediate deletes on the outer table of a hash join.
368 ///
369 /// If support for immediate operations on multiple tables is added,
370 /// this member could be changed from a node index to a NodeMap.
372
373 /// Which ordering the rows produced by this path follow, if any
374 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
375 /// but we don't want to add a dependency on interesting_orders.h from
376 /// this file, so we use the base type instead of the typedef here.
378
379 /// If an iterator has been instantiated for this access path, points to the
380 /// iterator. Used for constructing iterators that need to talk to each other
381 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
382 /// information in EXPLAIN ANALYZE queries.
384
385 double cost() const { return m_cost; }
386
387 double init_cost() const { return m_init_cost; }
388
389 double init_once_cost() const { return m_init_once_cost; }
390
391 double cost_before_filter() const { return m_cost_before_filter; }
392
393 void set_cost(double val) {
394 assert(val >= 0.0 || val == kUnknownCost);
395 m_cost = val;
396 }
397
398 void set_init_cost(double val) {
399 assert(val >= 0.0 || val == kUnknownCost);
400 m_init_cost = val;
401 }
402
403 void set_init_once_cost(double val) {
404 assert(val >= 0.0);
405 m_init_once_cost = val;
406 }
407
408 void set_cost_before_filter(double val) {
409 assert(val >= 0.0 || val == kUnknownCost);
411 }
412
413 /// Return the cost of scanning the given path for the second time
414 /// (or later) in the given query block. This is really the interesting
415 /// metric, not init_once_cost in itself, but since nearly all paths
416 /// have zero init_once_cost, storing that instead allows us to skip
417 /// a lot of repeated path->init_once_cost = path->init_cost calls
418 /// in the code.
419 double rescan_cost() const { return cost() - init_once_cost(); }
420
421 /// If no filter, identical to num_output_rows.
423
424 /// Bitmap of WHERE predicates that we are including on this access path,
425 /// referring to the “predicates” array internal to the join optimizer.
426 /// Since bit masks are much cheaper to deal with than creating Item
427 /// objects, and we don't invent new conditions during join optimization
428 /// (all of them are known when we begin optimization), we stick to
429 /// manipulating bit masks during optimization, saying which filters will be
430 /// applied at this node (a 1-bit means the filter will be applied here; if
431 /// there are multiple ones, they are ANDed together).
432 ///
433 /// This is used during join optimization only; before iterators are
434 /// created, we will add FILTER access paths to represent these instead,
435 /// removing the dependency on the array. Said FILTER paths are by
436 /// convention created with materialize_subqueries = false, since the by far
437 /// most common case is that there are no subqueries in the predicate.
438 /// In other words, if you wish to represent a filter with
439 /// materialize_subqueries = true, you will need to make an explicit FILTER
440 /// node.
441 ///
442 /// See also nested_loop_join().equijoin_predicates, which is for filters
443 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
445
446 /// Bitmap of sargable join predicates that have already been applied
447 /// in this access path by means of an index lookup (ref access),
448 /// again referring to “predicates”, and thus should not be counted again
449 /// for selectivity. Note that the filter may need to be applied
450 /// nevertheless (especially in case of type conversions); see
451 /// subsumed_sargable_join_predicates.
452 ///
453 /// Since these refer to the same array as filter_predicates, they will
454 /// never overlap with filter_predicates, and so we can reuse the same
455 /// memory using an alias (a union would not be allowed, since OverflowBitset
456 /// is a class with non-trivial default constructor), even though the meaning
457 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
458 /// bits 0..(N-1) belong to filter_predicates, and the rest to
459 /// applied_sargable_join_predicates.
461 return filter_predicates;
462 }
464 return filter_predicates;
465 }
466
467 /// Bitmap of WHERE predicates that touch tables we have joined in,
468 /// but that we could not apply yet (for instance because they reference
469 /// other tables, or because because we could not push them down into
470 /// the nullable side of outer joins). Used during planning only
471 /// (see filter_predicates).
473
474 /// Similar to applied_sargable_join_predicates, bitmap of sargable
475 /// join predicates that have been applied and will subsume the join
476 /// predicate entirely, ie., not only should the selectivity not be
477 /// double-counted, but the predicate itself is redundant and need not
478 /// be applied as a filter. (It is an error to have a bit set here but not
479 /// in applied_sargable_join_predicates.)
481 return delayed_predicates;
482 }
484 return delayed_predicates;
485 }
486
487 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
488 /// loaded when rows from this access path are evaluated; that is, this
489 /// access path must be put on the inner side of a nested-loop join (or
490 /// multiple such joins) where the outer side includes all of the given
491 /// tables.
492 ///
493 /// The most obvious case for this is dependent tables in LATERAL, but a more
494 /// common case is when we have pushed join conditions referring to those
495 /// tables; e.g., if this access path represents t1 and we have a condition
496 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
497 /// be set in this bitmap. We can still join in other tables, deferring t2,
498 /// but the bit(s) will then propagate, and we cannot be on the right side of
499 /// a hash join until parameter_tables is zero again. (Also see
500 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
501 /// as an optimization.)
502 ///
503 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
504 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
505 /// that the access path is entirely noncachable, because it depends on
506 /// something nondeterministic or an outer reference, and thus can never be on
507 /// the right side of a hash join, ever.
509
510 /// Auxiliary data used by a secondary storage engine while processing the
511 /// access path during optimization and execution. The secondary storage
512 /// engine is free to store any useful information in this member, for example
513 /// extra statistics or cost estimates. The data pointed to is fully owned by
514 /// the secondary storage engine, and it is the responsibility of the
515 /// secondary engine to manage the memory and make sure it is properly
516 /// destroyed.
517 void *secondary_engine_data{nullptr};
518
519 // Accessors for the union below.
520 auto &table_scan() {
521 assert(type == TABLE_SCAN);
522 return u.table_scan;
523 }
524 const auto &table_scan() const {
525 assert(type == TABLE_SCAN);
526 return u.table_scan;
527 }
528 auto &index_scan() {
529 assert(type == INDEX_SCAN);
530 return u.index_scan;
531 }
532 const auto &index_scan() const {
533 assert(type == INDEX_SCAN);
534 return u.index_scan;
535 }
537 assert(type == INDEX_DISTANCE_SCAN);
538 return u.index_distance_scan;
539 }
540 const auto &index_distance_scan() const {
541 assert(type == INDEX_DISTANCE_SCAN);
542 return u.index_distance_scan;
543 }
544 auto &ref() {
545 assert(type == REF);
546 return u.ref;
547 }
548 const auto &ref() const {
549 assert(type == REF);
550 return u.ref;
551 }
552 auto &ref_or_null() {
553 assert(type == REF_OR_NULL);
554 return u.ref_or_null;
555 }
556 const auto &ref_or_null() const {
557 assert(type == REF_OR_NULL);
558 return u.ref_or_null;
559 }
560 auto &eq_ref() {
561 assert(type == EQ_REF);
562 return u.eq_ref;
563 }
564 const auto &eq_ref() const {
565 assert(type == EQ_REF);
566 return u.eq_ref;
567 }
569 assert(type == PUSHED_JOIN_REF);
570 return u.pushed_join_ref;
571 }
572 const auto &pushed_join_ref() const {
573 assert(type == PUSHED_JOIN_REF);
574 return u.pushed_join_ref;
575 }
577 assert(type == FULL_TEXT_SEARCH);
578 return u.full_text_search;
579 }
580 const auto &full_text_search() const {
581 assert(type == FULL_TEXT_SEARCH);
582 return u.full_text_search;
583 }
584 auto &const_table() {
585 assert(type == CONST_TABLE);
586 return u.const_table;
587 }
588 const auto &const_table() const {
589 assert(type == CONST_TABLE);
590 return u.const_table;
591 }
592 auto &mrr() {
593 assert(type == MRR);
594 return u.mrr;
595 }
596 const auto &mrr() const {
597 assert(type == MRR);
598 return u.mrr;
599 }
600 auto &follow_tail() {
601 assert(type == FOLLOW_TAIL);
602 return u.follow_tail;
603 }
604 const auto &follow_tail() const {
605 assert(type == FOLLOW_TAIL);
606 return u.follow_tail;
607 }
609 assert(type == INDEX_RANGE_SCAN);
610 return u.index_range_scan;
611 }
612 const auto &index_range_scan() const {
613 assert(type == INDEX_RANGE_SCAN);
614 return u.index_range_scan;
615 }
616 auto &index_merge() {
617 assert(type == INDEX_MERGE);
618 return u.index_merge;
619 }
620 const auto &index_merge() const {
621 assert(type == INDEX_MERGE);
622 return u.index_merge;
623 }
625 assert(type == ROWID_INTERSECTION);
626 return u.rowid_intersection;
627 }
628 const auto &rowid_intersection() const {
629 assert(type == ROWID_INTERSECTION);
630 return u.rowid_intersection;
631 }
632 auto &rowid_union() {
633 assert(type == ROWID_UNION);
634 return u.rowid_union;
635 }
636 const auto &rowid_union() const {
637 assert(type == ROWID_UNION);
638 return u.rowid_union;
639 }
641 assert(type == INDEX_SKIP_SCAN);
642 return u.index_skip_scan;
643 }
644 const auto &index_skip_scan() const {
645 assert(type == INDEX_SKIP_SCAN);
646 return u.index_skip_scan;
647 }
649 assert(type == GROUP_INDEX_SKIP_SCAN);
650 return u.group_index_skip_scan;
651 }
652 const auto &group_index_skip_scan() const {
653 assert(type == GROUP_INDEX_SKIP_SCAN);
654 return u.group_index_skip_scan;
655 }
658 return u.dynamic_index_range_scan;
659 }
660 const auto &dynamic_index_range_scan() const {
662 return u.dynamic_index_range_scan;
663 }
666 return u.materialized_table_function;
667 }
668 const auto &materialized_table_function() const {
670 return u.materialized_table_function;
671 }
673 assert(type == UNQUALIFIED_COUNT);
674 return u.unqualified_count;
675 }
676 const auto &unqualified_count() const {
677 assert(type == UNQUALIFIED_COUNT);
678 return u.unqualified_count;
679 }
681 assert(type == TABLE_VALUE_CONSTRUCTOR);
682 return u.table_value_constructor;
683 }
684 const auto &table_value_constructor() const {
685 assert(type == TABLE_VALUE_CONSTRUCTOR);
686 return u.table_value_constructor;
687 }
689 assert(type == FAKE_SINGLE_ROW);
690 return u.fake_single_row;
691 }
692 const auto &fake_single_row() const {
693 assert(type == FAKE_SINGLE_ROW);
694 return u.fake_single_row;
695 }
696 auto &zero_rows() {
697 assert(type == ZERO_ROWS);
698 return u.zero_rows;
699 }
700 const auto &zero_rows() const {
701 assert(type == ZERO_ROWS);
702 return u.zero_rows;
703 }
705 assert(type == ZERO_ROWS_AGGREGATED);
706 return u.zero_rows_aggregated;
707 }
708 const auto &zero_rows_aggregated() const {
709 assert(type == ZERO_ROWS_AGGREGATED);
710 return u.zero_rows_aggregated;
711 }
712 auto &hash_join() {
713 assert(type == HASH_JOIN);
714 return u.hash_join;
715 }
716 const auto &hash_join() const {
717 assert(type == HASH_JOIN);
718 return u.hash_join;
719 }
720 auto &bka_join() {
721 assert(type == BKA_JOIN);
722 return u.bka_join;
723 }
724 const auto &bka_join() const {
725 assert(type == BKA_JOIN);
726 return u.bka_join;
727 }
729 assert(type == NESTED_LOOP_JOIN);
730 return u.nested_loop_join;
731 }
732 const auto &nested_loop_join() const {
733 assert(type == NESTED_LOOP_JOIN);
734 return u.nested_loop_join;
735 }
738 return u.nested_loop_semijoin_with_duplicate_removal;
739 }
742 return u.nested_loop_semijoin_with_duplicate_removal;
743 }
744 auto &filter() {
745 assert(type == FILTER);
746 return u.filter;
747 }
748 const auto &filter() const {
749 assert(type == FILTER);
750 return u.filter;
751 }
752 auto &sort() {
753 assert(type == SORT);
754 return u.sort;
755 }
756 const auto &sort() const {
757 assert(type == SORT);
758 return u.sort;
759 }
760 auto &aggregate() {
761 assert(type == AGGREGATE);
762 return u.aggregate;
763 }
764 const auto &aggregate() const {
765 assert(type == AGGREGATE);
766 return u.aggregate;
767 }
769 assert(type == TEMPTABLE_AGGREGATE);
770 return u.temptable_aggregate;
771 }
772 const auto &temptable_aggregate() const {
773 assert(type == TEMPTABLE_AGGREGATE);
774 return u.temptable_aggregate;
775 }
776 auto &limit_offset() {
777 assert(type == LIMIT_OFFSET);
778 return u.limit_offset;
779 }
780 const auto &limit_offset() const {
781 assert(type == LIMIT_OFFSET);
782 return u.limit_offset;
783 }
784 auto &stream() {
785 assert(type == STREAM);
786 return u.stream;
787 }
788 const auto &stream() const {
789 assert(type == STREAM);
790 return u.stream;
791 }
792 auto &materialize() {
793 assert(type == MATERIALIZE);
794 return u.materialize;
795 }
796 const auto &materialize() const {
797 assert(type == MATERIALIZE);
798 return u.materialize;
799 }
802 return u.materialize_information_schema_table;
803 }
806 return u.materialize_information_schema_table;
807 }
808 auto &append() {
809 assert(type == APPEND);
810 return u.append;
811 }
812 const auto &append() const {
813 assert(type == APPEND);
814 return u.append;
815 }
816 auto &window() {
817 assert(type == WINDOW);
818 return u.window;
819 }
820 const auto &window() const {
821 assert(type == WINDOW);
822 return u.window;
823 }
824 auto &weedout() {
825 assert(type == WEEDOUT);
826 return u.weedout;
827 }
828 const auto &weedout() const {
829 assert(type == WEEDOUT);
830 return u.weedout;
831 }
833 assert(type == REMOVE_DUPLICATES);
834 return u.remove_duplicates;
835 }
836 const auto &remove_duplicates() const {
837 assert(type == REMOVE_DUPLICATES);
838 return u.remove_duplicates;
839 }
842 return u.remove_duplicates_on_index;
843 }
844 const auto &remove_duplicates_on_index() const {
846 return u.remove_duplicates_on_index;
847 }
848 auto &alternative() {
849 assert(type == ALTERNATIVE);
850 return u.alternative;
851 }
852 const auto &alternative() const {
853 assert(type == ALTERNATIVE);
854 return u.alternative;
855 }
857 assert(type == CACHE_INVALIDATOR);
858 return u.cache_invalidator;
859 }
860 const auto &cache_invalidator() const {
861 assert(type == CACHE_INVALIDATOR);
862 return u.cache_invalidator;
863 }
864 auto &delete_rows() {
865 assert(type == DELETE_ROWS);
866 return u.delete_rows;
867 }
868 const auto &delete_rows() const {
869 assert(type == DELETE_ROWS);
870 return u.delete_rows;
871 }
872 auto &update_rows() {
873 assert(type == UPDATE_ROWS);
874 return u.update_rows;
875 }
876 const auto &update_rows() const {
877 assert(type == UPDATE_ROWS);
878 return u.update_rows;
879 }
880
881 double num_output_rows() const { return m_num_output_rows; }
882
883 void set_num_output_rows(double val) { m_num_output_rows = val; }
884
885 private:
886 /// Expected number of output rows.
888
889 /// Expected cost to read all of this access path once.
891
892 /// Expected cost to initialize this access path; ie., cost to read
893 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
894 /// Note that EXPLAIN prints out cost of reading the _first_ row
895 /// because it is easier for the user and also easier to measure in
896 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
897 /// initialization cost, so that is what we use in this member.
898 /// kUnknownCost for unknown.
900
901 /// Of init_cost, how much of the initialization needs only to be done
902 /// once per query block. (This is a cost, not a proportion.)
903 /// Ie., if the access path can reuse some its initialization work
904 /// if Init() is called multiple times, this member will be nonzero.
905 /// A typical example is a materialized table with rematerialize=false;
906 /// the second time Init() is called, it's a no-op. Most paths will have
907 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
908 /// We do not intend to use this field to model cache effects.
909 ///
910 /// This is currently not printed in EXPLAIN, only optimizer trace.
911 double m_init_once_cost{0.0};
912
913 /// If no filter, identical to cost. init_cost is always the same
914 /// (filters have zero initialization cost).
916
917 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
918 // It is private to force all access to be through the type-checking
919 // accessors.
920 //
921 // For information about the meaning of each value, see the corresponding
922 // row iterator constructors.
923 union {
924 struct {
927 struct {
928 TABLE *table;
929 int idx;
933 struct {
934 TABLE *table;
935 int idx;
937 bool reverse;
939 struct {
940 TABLE *table;
942 bool use_order;
943 bool reverse;
945 struct {
946 TABLE *table;
948 bool use_order;
950 struct {
951 TABLE *table;
954 struct {
955 TABLE *table;
957 bool use_order;
960 struct {
961 TABLE *table;
963 bool use_order;
967 struct {
968 TABLE *table;
971 struct {
972 TABLE *table;
978 struct {
979 TABLE *table;
981 struct {
982 // The key part(s) we are scanning on. Note that this may be an array.
983 // You can get the table we are working on by looking into
984 // used_key_parts[0].field->table (it is not stored directly, to avoid
985 // going over the AccessPath size limits).
987
988 // The actual ranges we are scanning over (originally derived from “key”).
989 // Not a Bounds_checked_array, to save 4 bytes on the length.
991 unsigned num_ranges;
992
993 unsigned mrr_flags;
994 unsigned mrr_buf_size;
995
996 // Which index (in the TABLE) we are scanning over, and how many of its
997 // key parts we are using.
998 unsigned index;
1000
1001 // If true, the scan can return rows in rowid order.
1003
1004 // If true, the scan _should_ return rows in rowid order.
1005 // Should only be set if can_be_used_for_ror == true.
1007
1008 // If true, this plan can be used for index merge scan.
1010
1011 // See row intersection for more details.
1013
1014 // Whether we are scanning over a geometry key part.
1015 bool geometry : 1;
1016
1017 // Whether we need a reverse scan. Only supported if geometry == false.
1018 bool reverse : 1;
1019
1020 // For a reverse scan, if we are using extended key parts. It is needed,
1021 // to set correct flags when retrieving records.
1024 struct {
1025 TABLE *table;
1030 struct {
1031 TABLE *table;
1033
1034 // Clustered primary key scan, if any.
1036
1037 bool forced_by_hint;
1040
1041 // If true, the first child scan should reuse table->file instead of
1042 // creating its own. This is true if the intersection is the topmost
1043 // range scan, but _not_ if it's below a union. (The reasons for this
1044 // are unknown.) It can also be negated by logic involving
1045 // retrieve_full_rows and is_covering, again for unknown reasons.
1046 //
1047 // This is not only for performance; multi-table delete has a hidden
1048 // dependency on this behavior when running against certain types of
1049 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
1050 // when deleting (and not all table types can transfer the position of one
1051 // handler to another by using position()).
1052 bool reuse_handler;
1053
1054 // true if no row retrieval phase is necessary.
1057 struct {
1058 TABLE *table;
1060 bool forced_by_hint;
1062 struct {
1063 TABLE *table;
1064 unsigned index;
1065 unsigned num_used_key_parts;
1066 bool forced_by_hint;
1067
1068 // Large, and has nontrivial destructors, so split out into
1069 // its own allocation.
1072 struct {
1073 TABLE *table;
1074 unsigned index;
1075 unsigned num_used_key_parts;
1076 bool forced_by_hint;
1077
1078 // Large, so split out into its own allocation.
1081 struct {
1082 TABLE *table;
1083 QEP_TAB *qep_tab; // Used only for buffering.
1085 struct {
1086 TABLE *table;
1090 struct {
1092
1093 struct {
1096 struct {
1097 // No members.
1099 struct {
1100 // The child is optional. It is only used for keeping track of which
1101 // tables are pruned away by this path, and it is only needed when this
1102 // path is on the inner side of an outer join. See ZeroRowsIterator for
1103 // details. The child of a ZERO_ROWS access path will not be visited by
1104 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1105 // only if called with include_pruned_tables = true. No iterator is
1106 // created for the child, and the child is not shown by EXPLAIN.
1108 // Used for EXPLAIN only.
1109 // TODO(sgunders): make an enum.
1110 const char *cause;
1112 struct {
1113 // Used for EXPLAIN only.
1114 // TODO(sgunders): make an enum.
1115 const char *cause;
1117
1118 struct {
1122 bool store_rowids; // Whether we are below a weedout or not.
1126 struct {
1131 bool store_rowids; // Whether we are below a weedout or not.
1134 struct {
1136 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1140
1141 // Equijoin filters to apply before the join, if any.
1142 // Indexes into join_predicate->expr->equijoin_conditions.
1143 // Non-equijoin conditions are always applied.
1144 // If already_expanded_predicates is true, do not re-expand.
1146
1147 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1148 // this struct needs an initializer, or the union would not be
1149 // default-constructible. If we need more than one union member
1150 // with such an initializer, we would probably need to change
1151 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1152 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1153 nullptr, {}};
1154 struct {
1156 const TABLE *table;
1158 size_t key_len;
1160
1161 struct {
1164
1165 // This parameter, unlike nearly all others, is not passed to the the
1166 // actual iterator. Instead, if true, it signifies that when creating
1167 // the iterator, all materializable subqueries in “condition” should be
1168 // materialized (with any in2exists condition removed first). In the
1169 // very rare case that there are two or more such subqueries, this is
1170 // an all-or-nothing decision, for simplicity.
1171 //
1172 // See FinalizeMaterializedSubqueries().
1175 struct {
1179
1180 // If filesort is nullptr: A new filesort will be created at the
1181 // end of optimization, using this order and flags. Otherwise: Only
1182 // used by EXPLAIN.
1189 struct {
1193 struct {
1196 TABLE *table;
1200 struct {
1202 ha_rows limit;
1206 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1207 // See Query_expression::send_records.
1210 struct {
1214 TABLE *table;
1216 int ref_slice;
1218 struct {
1219 // NOTE: The only legal access paths within table_path are
1220 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1221 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1223
1224 // Large, and has nontrivial destructors, so split out
1225 // into its own allocation.
1227 /** The total cost of executing the queries that we materialize.*/
1230 struct {
1233 Item *condition;
1235 struct {
1238 struct {
1243 int ref_slice;
1246 struct {
1251 struct {
1256 struct {
1258 TABLE *table;
1259 KEY *key;
1262 struct {
1264
1265 // For the ref.
1269 struct {
1271 const char *name;
1273 struct {
1278 struct {
1283 } u;
1284};
1285static_assert(std::is_trivially_destructible<AccessPath>::value,
1286 "AccessPath must be trivially destructible, as it is allocated "
1287 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1288 "(because multiple candidates during planning could point to "
1289 "the same access paths, and refcounting would be expensive)");
1290static_assert(sizeof(AccessPath) <= 144,
1291 "We are creating a lot of access paths in the join "
1292 "optimizer, so be sure not to bloat it without noticing. "
1293 "(96 bytes for the base, 48 bytes for the variant.)");
1294
1295inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1297 to->set_cost(from.cost());
1298 to->set_init_cost(from.init_cost());
1301 to->safe_for_rowid = from.safe_for_rowid;
1302 to->ordering_state = from.ordering_state;
1304}
1305
1306// Trivial factory functions for all of the types of access paths above.
1307
1309 bool count_examined_rows) {
1310 AccessPath *path = new (thd->mem_root) AccessPath;
1312 path->count_examined_rows = count_examined_rows;
1313 path->table_scan().table = table;
1314 return path;
1315}
1316
1318 bool use_order, bool reverse,
1319 bool count_examined_rows) {
1320 AccessPath *path = new (thd->mem_root) AccessPath;
1322 path->count_examined_rows = count_examined_rows;
1323 path->index_scan().table = table;
1324 path->index_scan().idx = idx;
1325 path->index_scan().use_order = use_order;
1326 path->index_scan().reverse = reverse;
1327 return path;
1328}
1329
1331 bool use_order, bool reverse,
1332 bool count_examined_rows) {
1333 AccessPath *path = new (thd->mem_root) AccessPath;
1334 path->type = AccessPath::REF;
1335 path->count_examined_rows = count_examined_rows;
1336 path->ref().table = table;
1337 path->ref().ref = ref;
1338 path->ref().use_order = use_order;
1339 path->ref().reverse = reverse;
1340 return path;
1341}
1342
1344 Index_lookup *ref, bool use_order,
1345 bool count_examined_rows) {
1346 AccessPath *path = new (thd->mem_root) AccessPath;
1348 path->count_examined_rows = count_examined_rows;
1349 path->ref_or_null().table = table;
1350 path->ref_or_null().ref = ref;
1351 path->ref_or_null().use_order = use_order;
1352 return path;
1353}
1354
1356 bool count_examined_rows) {
1357 AccessPath *path = new (thd->mem_root) AccessPath;
1358 path->type = AccessPath::EQ_REF;
1359 path->count_examined_rows = count_examined_rows;
1360 path->eq_ref().table = table;
1361 path->eq_ref().ref = ref;
1362 return path;
1363}
1364
1366 Index_lookup *ref, bool use_order,
1367 bool is_unique,
1368 bool count_examined_rows) {
1369 AccessPath *path = new (thd->mem_root) AccessPath;
1371 path->count_examined_rows = count_examined_rows;
1372 path->pushed_join_ref().table = table;
1373 path->pushed_join_ref().ref = ref;
1374 path->pushed_join_ref().use_order = use_order;
1375 path->pushed_join_ref().is_unique = is_unique;
1376 return path;
1377}
1378
1381 Item_func_match *ft_func,
1382 bool use_order, bool use_limit,
1383 bool count_examined_rows) {
1384 AccessPath *path = new (thd->mem_root) AccessPath;
1386 path->count_examined_rows = count_examined_rows;
1387 path->full_text_search().table = table;
1388 path->full_text_search().ref = ref;
1389 path->full_text_search().use_order = use_order;
1390 path->full_text_search().use_limit = use_limit;
1391 path->full_text_search().ft_func = ft_func;
1392 return path;
1393}
1394
1397 bool count_examined_rows) {
1398 AccessPath *path = new (thd->mem_root) AccessPath;
1400 path->count_examined_rows = count_examined_rows;
1401 path->set_num_output_rows(1.0);
1402 path->set_cost(0.0);
1403 path->set_init_cost(0.0);
1404 path->set_init_once_cost(0.0);
1405 path->const_table().table = table;
1406 path->const_table().ref = ref;
1407 return path;
1408}
1409
1411 int mrr_flags) {
1412 AccessPath *path = new (thd->mem_root) AccessPath;
1413 path->type = AccessPath::MRR;
1414 path->mrr().table = table;
1415 path->mrr().ref = ref;
1416 path->mrr().mrr_flags = mrr_flags;
1417
1418 // This will be filled in when the BKA iterator is created.
1419 path->mrr().bka_path = nullptr;
1420
1421 return path;
1422}
1423
1425 bool count_examined_rows) {
1426 AccessPath *path = new (thd->mem_root) AccessPath;
1428 path->count_examined_rows = count_examined_rows;
1429 path->follow_tail().table = table;
1430 return path;
1431}
1432
1434 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1435 AccessPath *path = new (thd->mem_root) AccessPath;
1437 path->count_examined_rows = count_examined_rows;
1438 path->dynamic_index_range_scan().table = table;
1439 path->dynamic_index_range_scan().qep_tab = qep_tab;
1440 return path;
1441}
1442
1444 THD *thd, TABLE *table, Table_function *table_function,
1445 AccessPath *table_path) {
1446 AccessPath *path = new (thd->mem_root) AccessPath;
1448 path->materialized_table_function().table = table;
1449 path->materialized_table_function().table_function = table_function;
1450 path->materialized_table_function().table_path = table_path;
1451 return path;
1452}
1453
1455 AccessPath *path = new (thd->mem_root) AccessPath;
1457 return path;
1458}
1459
1461 const JOIN *join);
1462
1464 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1465 KEY *key, size_t key_len) {
1466 AccessPath *path = new (thd->mem_root) AccessPath;
1468 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1469 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1470 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1471 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1472 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1473 return path;
1474}
1475
1477 Item *condition) {
1478 AccessPath *path = new (thd->mem_root) AccessPath;
1479 path->type = AccessPath::FILTER;
1480 path->filter().child = child;
1481 path->filter().condition = condition;
1482 path->filter().materialize_subqueries = false;
1483 path->has_group_skip_scan = child->has_group_skip_scan;
1484 return path;
1485}
1486
1487// Not inline, because it needs access to filesort internals
1488// (which are forward-declared in this file).
1490 ORDER *order, bool count_examined_rows);
1491
1493 olap_type olap) {
1494 AccessPath *path = new (thd->mem_root) AccessPath;
1496 path->aggregate().child = child;
1497 path->aggregate().olap = olap;
1498 path->has_group_skip_scan = child->has_group_skip_scan;
1499 return path;
1500}
1501
1503 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1504 TABLE *table, AccessPath *table_path, int ref_slice) {
1505 AccessPath *path = new (thd->mem_root) AccessPath;
1507 path->temptable_aggregate().subquery_path = subquery_path;
1508 path->temptable_aggregate().temp_table_param = temp_table_param;
1509 path->temptable_aggregate().table = table;
1510 path->temptable_aggregate().table_path = table_path;
1511 path->temptable_aggregate().ref_slice = ref_slice;
1512 return path;
1513}
1514
1516 ha_rows limit, ha_rows offset,
1517 bool count_all_rows,
1518 bool reject_multiple_rows,
1519 ha_rows *send_records_override) {
1521 AccessPath *path = new (thd->mem_root) AccessPath;
1523 path->immediate_update_delete_table = child->immediate_update_delete_table;
1524 path->limit_offset().child = child;
1525 path->limit_offset().limit = limit;
1526 path->limit_offset().offset = offset;
1527 path->limit_offset().count_all_rows = count_all_rows;
1528 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1529 path->limit_offset().send_records_override = send_records_override;
1530 path->ordering_state = child->ordering_state;
1531 path->has_group_skip_scan = child->has_group_skip_scan;
1533 return path;
1534}
1535
1537 bool count_examined_rows) {
1538 AccessPath *path = new (thd->mem_root) AccessPath;
1540 path->count_examined_rows = count_examined_rows;
1541 path->set_num_output_rows(1.0);
1542 path->set_cost(0.0);
1543 path->set_init_cost(0.0);
1544 path->set_init_once_cost(0.0);
1545 return path;
1546}
1547
1549 const char *cause) {
1550 AccessPath *path = new (thd->mem_root) AccessPath;
1552 path->zero_rows().child = child;
1553 path->zero_rows().cause = cause;
1554 path->set_num_output_rows(0.0);
1555 path->set_cost(0.0);
1556 path->set_init_cost(0.0);
1557 path->set_init_once_cost(0.0);
1558 path->num_output_rows_before_filter = 0.0;
1559 path->set_cost_before_filter(0.0);
1560 return path;
1561}
1562
1563inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1564 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1565}
1566
1568 const char *cause) {
1569 AccessPath *path = new (thd->mem_root) AccessPath;
1571 path->zero_rows_aggregated().cause = cause;
1572 path->set_num_output_rows(1.0);
1573 path->set_cost(0.0);
1574 path->set_init_cost(0.0);
1575 return path;
1576}
1577
1579 JOIN *join,
1580 Temp_table_param *temp_table_param,
1581 TABLE *table, int ref_slice) {
1582 AccessPath *path = new (thd->mem_root) AccessPath;
1583 path->type = AccessPath::STREAM;
1584 path->stream().child = child;
1585 path->stream().join = join;
1586 path->stream().temp_table_param = temp_table_param;
1587 path->stream().table = table;
1588 path->stream().ref_slice = ref_slice;
1589 // Will be set later if we get a weedout access path as parent.
1590 path->stream().provide_rowid = false;
1591 path->has_group_skip_scan = child->has_group_skip_scan;
1592 return path;
1593}
1594
1597 JOIN *join, bool copy_items,
1598 Temp_table_param *temp_table_param) {
1599 assert(path != nullptr);
1601 MaterializePathParameters::Operand &operand = array[0];
1602 operand.subquery_path = path;
1603 operand.select_number = select_number;
1604 operand.join = join;
1606 operand.copy_items = copy_items;
1607 operand.temp_table_param = temp_table_param;
1608 return array;
1609}
1610
1614 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1615 int ref_slice, bool rematerialize, ha_rows limit_rows,
1616 bool reject_multiple_rows) {
1619 param->m_operands = std::move(operands);
1620 if (rematerialize) {
1621 // There's no point in adding invalidators if we're rematerializing
1622 // every time anyway.
1623 param->invalidators = nullptr;
1624 } else {
1625 param->invalidators = invalidators;
1626 }
1627 param->table = table;
1628 param->cte = cte;
1629 param->unit = unit;
1630 param->ref_slice = ref_slice;
1631 param->rematerialize = rematerialize;
1632 param->limit_rows = (table == nullptr || table->is_union_or_table()
1633 ? limit_rows
1634 :
1635 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1636 // see its constructor
1637 HA_POS_ERROR);
1638 param->reject_multiple_rows = reject_multiple_rows;
1639
1640#ifndef NDEBUG
1641 for (MaterializePathParameters::Operand &operand : param->m_operands) {
1642 assert(operand.subquery_path != nullptr);
1643 }
1644#endif
1645
1646 AccessPath *path = new (thd->mem_root) AccessPath;
1648 path->materialize().table_path = table_path;
1649 path->materialize().param = param;
1650 path->materialize().subquery_cost = kUnknownCost;
1651 if (rematerialize) {
1652 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1653 } else {
1654 // The default; this is just to be explicit in the code.
1655 path->safe_for_rowid = AccessPath::SAFE;
1656 }
1657 return path;
1658}
1659
1661 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1662 AccessPath *path = new (thd->mem_root) AccessPath;
1664 path->materialize_information_schema_table().table_path = table_path;
1665 path->materialize_information_schema_table().table_list = table_list;
1666 path->materialize_information_schema_table().condition = condition;
1667 return path;
1668}
1669
1670/// Add path costs c1 and c2, but handle kUnknownCost correctly.
1671inline double AddCost(double c1, double c2) {
1672 // If one is undefined, use the other, as we have nothing else.
1673 if (c1 == kUnknownCost) {
1674 return c2;
1675 } else if (c2 == kUnknownCost) {
1676 return c1;
1677 } else {
1678 return c1 + c2;
1679 }
1680}
1681
1682// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1683// long as the access path.
1685 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1686 AccessPath *path = new (thd->mem_root) AccessPath;
1687 path->type = AccessPath::APPEND;
1688 path->append().children = children;
1689 double num_output_rows = 0.0;
1690 for (const AppendPathParameters &child : *children) {
1691 path->set_cost(AddCost(path->cost(), child.path->cost()));
1692 path->set_init_cost(AddCost(path->init_cost(), child.path->init_cost()));
1693 path->set_init_once_cost(path->init_once_cost() +
1694 child.path->init_once_cost());
1695 num_output_rows += child.path->num_output_rows();
1696 }
1697 path->set_num_output_rows(num_output_rows);
1698 return path;
1699}
1700
1702 Window *window,
1703 Temp_table_param *temp_table_param,
1704 int ref_slice, bool needs_buffering) {
1705 AccessPath *path = new (thd->mem_root) AccessPath;
1706 path->type = AccessPath::WINDOW;
1707 path->window().child = child;
1708 path->window().window = window;
1709 path->window().temp_table = nullptr;
1710 path->window().temp_table_param = temp_table_param;
1711 path->window().ref_slice = ref_slice;
1712 path->window().needs_buffering = needs_buffering;
1713 path->set_num_output_rows(child->num_output_rows());
1714 return path;
1715}
1716
1718 SJ_TMP_TABLE *weedout_table) {
1719 AccessPath *path = new (thd->mem_root) AccessPath;
1720 path->type = AccessPath::WEEDOUT;
1721 path->weedout().child = child;
1722 path->weedout().weedout_table = weedout_table;
1723 path->weedout().tables_to_get_rowid_for =
1724 0; // Must be handled by the caller.
1725 return path;
1726}
1727
1729 Item **group_items,
1730 int group_items_size) {
1731 AccessPath *path = new (thd->mem_root) AccessPath;
1733 path->remove_duplicates().child = child;
1734 path->remove_duplicates().group_items = group_items;
1735 path->remove_duplicates().group_items_size = group_items_size;
1736 path->has_group_skip_scan = child->has_group_skip_scan;
1737 return path;
1738}
1739
1741 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1742 unsigned loosescan_key_len) {
1743 AccessPath *path = new (thd->mem_root) AccessPath;
1745 path->remove_duplicates_on_index().child = child;
1746 path->remove_duplicates_on_index().table = table;
1747 path->remove_duplicates_on_index().key = key;
1748 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1749 path->has_group_skip_scan = child->has_group_skip_scan;
1750 return path;
1751}
1752
1754 AccessPath *table_scan_path,
1755 Index_lookup *used_ref) {
1756 AccessPath *path = new (thd->mem_root) AccessPath;
1758 path->alternative().table_scan_path = table_scan_path;
1759 path->alternative().child = child;
1760 path->alternative().used_ref = used_ref;
1761 return path;
1762}
1763
1765 const char *name) {
1766 AccessPath *path = new (thd->mem_root) AccessPath;
1768 path->cache_invalidator().child = child;
1769 path->cache_invalidator().name = name;
1770 return path;
1771}
1772
1775 table_map immediate_tables);
1776
1779 table_map immediate_tables);
1780
1781/**
1782 Modifies "path" and the paths below it so that they provide row IDs for
1783 all tables.
1784 */
1786
1789 bool eligible_for_batch_mode);
1790
1791// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1792// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1793// that does anything else is DynamicRangeIterator.)
1795 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1797 eligible_for_batch_mode);
1798}
1799
1800void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1801 const POSITION *pos, bool is_after_filter,
1802 AccessPath *path);
1803
1804/**
1805 Return the TABLE* referred from 'path' if it is a basic access path,
1806 else a nullptr is returned. Temporary tables, such as those used by
1807 sorting, aggregate and subquery materialization are not returned.
1808*/
1810
1811/**
1812 Returns a map of all tables read when `path` or any of its children are
1813 executed. Only iterators that are part of the same query block as `path`
1814 are considered.
1815
1816 If a table is read that doesn't have a map, specifically the temporary
1817 tables made as part of materialization within the same query block,
1818 RAND_TABLE_BIT will be set as a convention and none of that access path's
1819 children will be included in the map. In this case, the caller will need to
1820 manually go in and find said access path, to ask it for its TABLE object.
1821
1822 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1823 access path (ie., pruned away due to impossible join conditions) will be
1824 included in the map. This is normally what you want, as those tables need to
1825 be included whenever you store NULL flags and the likes, but if you don't
1826 want them (perhaps to specifically check for conditions referring to pruned
1827 tables), you can set it to false.
1828 */
1829table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1830
1831/**
1832 Find the list of all tables used by this root, stopping at materializations.
1833 Used for knowing which tables to sort.
1834 */
1836
1837/**
1838 For each access path in the (sub)tree rooted at “path”, expand any use of
1839 “filter_predicates” into newly-inserted FILTER access paths, using the given
1840 predicate list. This is used after finding an optimal set of access paths,
1841 to normalize the tree so that the remaining consumers do not need to worry
1842 about filter_predicates and cost_before_filter.
1843
1844 “join” is the join that “path” is part of.
1845 */
1847 const Mem_root_array<Predicate> &predicates,
1848 unsigned num_where_predicates);
1849
1850/**
1851 Extracts the Item expression from the given “filter_predicates” corresponding
1852 to the given “mask”.
1853 */
1856 int num_where_predicates);
1857
1858/// Like ExpandFilterAccessPaths(), but expands only the single access path
1859/// at “path”.
1861 const Mem_root_array<Predicate> &predicates,
1862 unsigned num_where_predicates);
1863
1864/// Returns the tables that are part of a hash join.
1866
1867/**
1868 Get the conditions to put into the extra conditions of the HashJoinIterator.
1869 This includes the non-equijoin conditions, as well as any equijoin conditions
1870 on columns that are too big to include in the hash table. (The old optimizer
1871 handles equijoin conditions on long columns elsewhere, so the last part only
1872 applies to the hypergraph optimizer.)
1873
1874 @param mem_root The root on which to allocate memory, if needed.
1875 @param using_hypergraph_optimizer True if using the hypergraph optimizer.
1876 @param equijoin_conditions All the equijoin conditions of the join.
1877 @param other_conditions All the non-equijoin conditions of the join.
1878
1879 @return All the conditions to evaluate as "extra conditions" in
1880 HashJoinIterator, or nullptr on OOM.
1881 */
1883 MEM_ROOT *mem_root, bool using_hypergraph_optimizer,
1884 const std::vector<HashJoinCondition> &equijoin_conditions,
1885 const Mem_root_array<Item *> &other_conditions);
1886
1887#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
constexpr double kUnknownCost
To indicate that a cost estimate is not yet made.
Definition: access_path.h:188
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1578
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1728
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1764
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1330
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:121
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1295
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1489
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1379
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1502
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1616
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1433
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1567
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1753
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:133
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1410
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:269
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1684
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1463
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1740
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1536
AccessPath * NewTableValueConstructorAccessPath(const THD *thd, const JOIN *join)
Definition: access_path.cc:165
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1478
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1476
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:183
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:86
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1443
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::Operand > operands, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1611
const Mem_root_array< Item * > * GetExtraHashJoinConditions(MEM_ROOT *mem_root, bool using_hypergraph_optimizer, const std::vector< HashJoinCondition > &equijoin_conditions, const Mem_root_array< Item * > &other_conditions)
Get the conditions to put into the extra conditions of the HashJoinIterator.
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:300
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1308
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, olap_type olap)
Definition: access_path.h:1492
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1604
double AddCost(double c1, double c2)
Add path costs c1 and c2, but handle kUnknownCost correctly.
Definition: access_path.h:1671
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1660
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1334
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:214
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1343
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1515
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1355
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1701
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1424
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1395
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:485
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1717
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1365
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1548
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1454
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1317
Mem_root_array< MaterializePathParameters::Operand > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1596
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4390
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:53
Sorting related info.
Definition: filesort.h:51
A class that represents a join condition in a hash join.
Definition: item_cmpfunc.h:86
Definition: item_func.h:3455
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:933
Definition: sql_optimizer.h:132
Definition: key.h:112
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:425
Definition: overflow_bitset.h:77
Definition: sql_executor.h:253
Definition: range_optimizer.h:68
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:624
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:81
Definition: sql_executor.h:94
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:35
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:39
Class representing a table function.
Definition: table_function.h:52
Definition: table.h:2853
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:94
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:104
static MEM_ROOT mem_root
Definition: client_plugin.cc:113
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a LIMIT_OFFSET AccessPath.
Definition: cost_model.cc:932
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:366
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1865
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:62
JoinType
Definition: join_type.h:27
static mi_bit_type mask[]
Definition: mi_packrec.cc:140
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:476
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1140
#define HA_POS_ERROR
Definition: my_base.h:1142
uint64_t table_map
Definition: my_table_map.h:29
static char * path
Definition: mysqldump.cc:148
static PFS_engine_table_share_proxy table
Definition: pfs.cc:60
PT & ref(PT *tp)
Definition: tablespace_impl.cc:358
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:39
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:131
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:150
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:38
olap_type
Definition: olap.h:30
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:59
case opt name
Definition: sslopt-case.h:32
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:212
struct AccessPath::@60::@67 pushed_join_ref
auto & weedout()
Definition: access_path.h:824
struct AccessPath::@60::@75 rowid_union
auto & filter()
Definition: access_path.h:744
bool count_all_rows
Definition: access_path.h:1204
AccessPath * bka_path
Definition: access_path.h:974
auto & ref_or_null()
Definition: access_path.h:552
auto & materialized_table_function()
Definition: access_path.h:664
AccessPath * cpk_child
Definition: access_path.h:1035
auto & rowid_union()
Definition: access_path.h:632
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:508
const auto & bka_join() const
Definition: access_path.h:724
TABLE * temp_table
Definition: access_path.h:1241
OverflowBitset equijoin_predicates
Definition: access_path.h:1145
Item ** group_items
Definition: access_path.h:1253
struct AccessPath::@60::@79 materialized_table_function
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:419
auto & materialize()
Definition: access_path.h:792
const auto & temptable_aggregate() const
Definition: access_path.h:772
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:480
auto & index_scan()
Definition: access_path.h:528
const auto & delete_rows() const
Definition: access_path.h:868
const auto & filter() const
Definition: access_path.h:748
const auto & index_distance_scan() const
Definition: access_path.h:540
auto & delete_rows()
Definition: access_path.h:864
struct AccessPath::@60::@74 rowid_intersection
bool reuse_handler
Definition: access_path.h:1012
struct AccessPath::@60::@90 sort
KEY_PART * used_key_part
Definition: access_path.h:986
double m_init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:899
olap_type olap
Definition: access_path.h:1191
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:460
Item * condition
Definition: access_path.h:1163
const auto & index_merge() const
Definition: access_path.h:620
const auto & update_rows() const
Definition: access_path.h:876
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:848
auto & pushed_join_ref()
Definition: access_path.h:568
struct AccessPath::@60::@95 materialize
void set_cost(double val)
Definition: access_path.h:393
AccessPath * outer
Definition: access_path.h:1119
struct AccessPath::@60::@104 delete_rows
auto & index_skip_scan()
Definition: access_path.h:640
bool rewrite_semi_to_inner
Definition: access_path.h:1123
const auto & zero_rows_aggregated() const
Definition: access_path.h:708
auto & group_index_skip_scan()
Definition: access_path.h:648
const auto & eq_ref() const
Definition: access_path.h:564
int group_items_size
Definition: access_path.h:1254
bool use_order
Definition: access_path.h:930
bool pfs_batch_mode
Definition: access_path.h:1137
auto & temptable_aggregate()
Definition: access_path.h:768
AccessPath()
Definition: access_path.h:218
GroupIndexSkipScanParameters * param
Definition: access_path.h:1079
auto & rowid_intersection()
Definition: access_path.h:624
struct AccessPath::@60::@89 filter
double init_cost() const
Definition: access_path.h:387
bool materialize_subqueries
Definition: access_path.h:1173
AccessPath * child
Definition: access_path.h:1107
bool allow_spill_to_disk
Definition: access_path.h:1121
struct AccessPath::@60::@85 hash_join
struct AccessPath::@60::@61 table_scan
Index_lookup * used_ref
Definition: access_path.h:1267
auto & mrr()
Definition: access_path.h:592
const auto & index_scan() const
Definition: access_path.h:532
const auto & fake_single_row() const
Definition: access_path.h:692
struct AccessPath::@60::@99 weedout
bool reject_multiple_rows
Definition: access_path.h:1205
bool allow_clustered_primary_key_scan
Definition: access_path.h:1027
const char * name
Definition: access_path.h:1271
bool use_limit
Definition: access_path.h:964
Window * window
Definition: access_path.h:1240
table_map tables_to_get_rowid_for
Definition: access_path.h:1124
auto & materialize_information_schema_table()
Definition: access_path.h:800
auto & bka_join()
Definition: access_path.h:720
struct AccessPath::@60::@83 zero_rows
const auto & follow_tail() const
Definition: access_path.h:604
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1248
bool has_group_skip_scan
Whether this access path contains a GROUP_INDEX_SKIP_SCAN.
Definition: access_path.h:322
Index_lookup * ref
Definition: access_path.h:941
const auto & window() const
Definition: access_path.h:820
void set_init_once_cost(double val)
Definition: access_path.h:403
const auto & materialized_table_function() const
Definition: access_path.h:668
auto & unqualified_count()
Definition: access_path.h:672
bool keep_current_rowid
Definition: access_path.h:976
bool using_extended_key_parts
Definition: access_path.h:1022
struct AccessPath::@60::@66 eq_ref
auto & nested_loop_join()
Definition: access_path.h:728
const auto & full_text_search() const
Definition: access_path.h:580
struct AccessPath::@60::@92 temptable_aggregate
struct AccessPath::@60::@97 append
struct AccessPath::@60::@101 remove_duplicates_on_index
bool can_be_used_for_ror
Definition: access_path.h:1002
float rec_per_key
Definition: access_path.h:1130
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:313
JOIN * join
Definition: access_path.h:1212
unsigned index
Definition: access_path.h:998
unsigned mrr_buf_size
Definition: access_path.h:994
auto & remove_duplicates()
Definition: access_path.h:832
struct AccessPath::@60::@78 dynamic_index_range_scan
struct AccessPath::@60::@93 limit_offset
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:383
const auto & stream() const
Definition: access_path.h:788
const auto & remove_duplicates() const
Definition: access_path.h:836
auto & window()
Definition: access_path.h:816
bool need_rows_in_rowid_order
Definition: access_path.h:1006
Table_ref * table_list
Definition: access_path.h:1232
const auto & unqualified_count() const
Definition: access_path.h:676
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:517
struct AccessPath::@60::@84 zero_rows_aggregated
void set_init_cost(double val)
Definition: access_path.h:398
void set_num_output_rows(double val)
Definition: access_path.h:883
bool is_covering
Definition: access_path.h:1055
const auto & mrr() const
Definition: access_path.h:596
void set_cost_before_filter(double val)
Definition: access_path.h:408
struct AccessPath::@60::@102 alternative
bool remove_duplicates
Definition: access_path.h:1185
table_map tables_to_update
Definition: access_path.h:1280
const auto & table_value_constructor() const
Definition: access_path.h:684
KEY * key
Definition: access_path.h:1157
struct AccessPath::@60::@65 ref_or_null
auto & table_value_constructor()
Definition: access_path.h:680
auto & stream()
Definition: access_path.h:784
size_t key_len
Definition: access_path.h:1158
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:483
const auto & zero_rows() const
Definition: access_path.h:700
const auto & append() const
Definition: access_path.h:812
ha_rows offset
Definition: access_path.h:1203
int idx
Definition: access_path.h:929
auto & hash_join()
Definition: access_path.h:712
auto & cache_invalidator()
Definition: access_path.h:856
bool force_sort_rowids
Definition: access_path.h:1187
unsigned num_used_key_parts
Definition: access_path.h:999
unsigned num_ranges
Definition: access_path.h:991
ORDER * order
Definition: access_path.h:1183
auto & sort()
Definition: access_path.h:752
auto & fake_single_row()
Definition: access_path.h:688
struct AccessPath::@60::@81 table_value_constructor
ha_rows limit
Definition: access_path.h:1184
struct AccessPath::@60::@73 index_merge
struct AccessPath::@60::@76 index_skip_scan
auto & follow_tail()
Definition: access_path.h:600
double m_num_output_rows
Expected number of output rows.
Definition: access_path.h:887
const JoinPredicate * join_predicate
Definition: access_path.h:1120
struct AccessPath::@60::@72 index_range_scan
const auto & const_table() const
Definition: access_path.h:588
struct AccessPath::@60::@69 const_table
const auto & dynamic_index_range_scan() const
Definition: access_path.h:660
const auto & table_scan() const
Definition: access_path.h:524
unsigned mrr_flags
Definition: access_path.h:993
int ref_slice
Definition: access_path.h:1198
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:444
bool can_be_used_for_imerge
Definition: access_path.h:1009
bool forced_by_hint
Definition: access_path.h:1026
struct AccessPath::@60::@82 fake_single_row
auto & limit_offset()
Definition: access_path.h:776
AccessPath * inner
Definition: access_path.h:1119
QUICK_RANGE * range
Definition: access_path.h:936
bool provide_rowid
Definition: access_path.h:1215
const auto & index_range_scan() const
Definition: access_path.h:612
const auto & ref() const
Definition: access_path.h:548
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:327
Item_func_match * ft_func
Definition: access_path.h:965
struct AccessPath::@60::@96 materialize_information_schema_table
ha_rows * send_records_override
Definition: access_path.h:1208
struct AccessPath::@60::@94 stream
auto & table_scan()
Definition: access_path.h:520
const auto & hash_join() const
Definition: access_path.h:716
QEP_TAB * qep_tab
Definition: access_path.h:1083
Table_function * table_function
Definition: access_path.h:1087
auto & zero_rows_aggregated()
Definition: access_path.h:704
const auto & sort() const
Definition: access_path.h:756
union AccessPath::@60 u
bool unwrap_rollup
Definition: access_path.h:1186
Type
Definition: access_path.h:228
@ FOLLOW_TAIL
Definition: access_path.h:242
@ FILTER
Definition: access_path.h:266
@ PUSHED_JOIN_REF
Definition: access_path.h:238
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:255
@ UPDATE_ROWS
Definition: access_path.h:284
@ AGGREGATE
Definition: access_path.h:268
@ BKA_JOIN
Definition: access_path.h:262
@ ZERO_ROWS
Definition: access_path.h:254
@ CONST_TABLE
Definition: access_path.h:240
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:248
@ INDEX_RANGE_SCAN
Definition: access_path.h:243
@ UNQUALIFIED_COUNT
Definition: access_path.h:257
@ EQ_REF
Definition: access_path.h:237
@ FAKE_SINGLE_ROW
Definition: access_path.h:253
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:273
@ WINDOW
Definition: access_path.h:275
@ REF_OR_NULL
Definition: access_path.h:236
@ MATERIALIZE
Definition: access_path.h:272
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:261
@ ROWID_UNION
Definition: access_path.h:246
@ INDEX_SKIP_SCAN
Definition: access_path.h:247
@ MRR
Definition: access_path.h:241
@ CACHE_INVALIDATOR
Definition: access_path.h:280
@ INDEX_SCAN
Definition: access_path.h:233
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:252
@ WEEDOUT
Definition: access_path.h:276
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:256
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:278
@ TABLE_SCAN
Definition: access_path.h:232
@ REF
Definition: access_path.h:235
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:269
@ LIMIT_OFFSET
Definition: access_path.h:270
@ APPEND
Definition: access_path.h:274
@ NESTED_LOOP_JOIN
Definition: access_path.h:260
@ INDEX_MERGE
Definition: access_path.h:244
@ FULL_TEXT_SEARCH
Definition: access_path.h:239
@ ALTERNATIVE
Definition: access_path.h:279
@ STREAM
Definition: access_path.h:271
@ REMOVE_DUPLICATES
Definition: access_path.h:277
@ ROWID_INTERSECTION
Definition: access_path.h:245
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:249
@ DELETE_ROWS
Definition: access_path.h:283
@ SORT
Definition: access_path.h:267
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:234
@ HASH_JOIN
Definition: access_path.h:263
bool retrieve_full_rows
Definition: access_path.h:1038
struct AccessPath::@60::@70 mrr
double m_init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:911
const auto & rowid_union() const
Definition: access_path.h:636
const TABLE * table
Definition: access_path.h:1156
const auto & index_skip_scan() const
Definition: access_path.h:644
auto & dynamic_index_range_scan()
Definition: access_path.h:656
const auto & pushed_join_ref() const
Definition: access_path.h:572
auto & remove_duplicates_on_index()
Definition: access_path.h:840
table_map immediate_tables
Definition: access_path.h:1276
double m_cost_before_filter
If no filter, identical to cost.
Definition: access_path.h:915
double cost() const
Definition: access_path.h:385
int mrr_flags
Definition: access_path.h:975
struct AccessPath::@60::@62 index_scan
auto & full_text_search()
Definition: access_path.h:576
const auto & materialize() const
Definition: access_path.h:796
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:377
auto & eq_ref()
Definition: access_path.h:560
struct AccessPath::@60::@63 index_distance_scan
JoinType join_type
Definition: access_path.h:1128
struct AccessPath::@60::@77 group_index_skip_scan
const auto & limit_offset() const
Definition: access_path.h:780
MaterializePathParameters * param
Definition: access_path.h:1226
const char * cause
Definition: access_path.h:1110
auto & update_rows()
Definition: access_path.h:872
auto & index_merge()
Definition: access_path.h:616
auto & aggregate()
Definition: access_path.h:760
double m_cost
Expected cost to read all of this access path once.
Definition: access_path.h:890
struct AccessPath::@60::@91 aggregate
const auto & group_index_skip_scan() const
Definition: access_path.h:652
auto & zero_rows()
Definition: access_path.h:696
AccessPath * subquery_path
Definition: access_path.h:1194
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1236
struct AccessPath::@60::@86 bka_join
const auto & nested_loop_join() const
Definition: access_path.h:732
bool already_expanded_predicates
Definition: access_path.h:1138
const auto & remove_duplicates_on_index() const
Definition: access_path.h:844
Temp_table_param * temp_table_param
Definition: access_path.h:1195
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:736
bool reverse
Definition: access_path.h:931
AccessPath * table_scan_path
Definition: access_path.h:1263
struct AccessPath::@60::@88 nested_loop_semijoin_with_duplicate_removal
const auto & materialize_information_schema_table() const
Definition: access_path.h:804
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1228
bool geometry
Definition: access_path.h:1015
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:319
unsigned loosescan_key_len
Definition: access_path.h:1260
auto & index_distance_scan()
Definition: access_path.h:536
auto & ref()
Definition: access_path.h:544
table_map tables_to_delete_from
Definition: access_path.h:1275
struct AccessPath::@60::@87 nested_loop_join
double init_once_cost() const
Definition: access_path.h:389
IndexSkipScanParameters * param
Definition: access_path.h:1070
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:463
Mem_root_array< AccessPath * > * children
Definition: access_path.h:1028
struct AccessPath::@60::@80 unqualified_count
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:472
AccessPath * table_path
Definition: access_path.h:1088
const auto & aggregate() const
Definition: access_path.h:764
struct AccessPath::@60::@103 cache_invalidator
TABLE * table
Definition: access_path.h:925
auto & append()
Definition: access_path.h:808
double cost_before_filter() const
Definition: access_path.h:391
const auto & weedout() const
Definition: access_path.h:828
const auto & cache_invalidator() const
Definition: access_path.h:860
bool needs_buffering
Definition: access_path.h:1244
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:371
QUICK_RANGE ** ranges
Definition: access_path.h:990
struct AccessPath::@60::@68 full_text_search
bool is_unique
Definition: access_path.h:958
const auto & alternative() const
Definition: access_path.h:852
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:294
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:303
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:309
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:296
auto & const_table()
Definition: access_path.h:584
struct AccessPath::@60::@105 update_rows
unsigned mrr_length_per_rec
Definition: access_path.h:1129
const auto & rowid_intersection() const
Definition: access_path.h:628
Mem_root_array< Item_values_column * > * output_refs
Definition: access_path.h:1094
auto & index_range_scan()
Definition: access_path.h:608
double num_output_rows() const
Definition: access_path.h:881
double num_output_rows_before_filter
If no filter, identical to num_output_rows.
Definition: access_path.h:422
const auto & ref_or_null() const
Definition: access_path.h:556
struct AccessPath::@60::@71 follow_tail
Filesort * filesort
Definition: access_path.h:1177
bool store_rowids
Definition: access_path.h:1122
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:740
Definition: access_path.h:177
JOIN * join
Definition: access_path.h:179
AccessPath * path
Definition: access_path.h:178
Definition: group_index_skip_scan_plan.h:44
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:72
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:66
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:77
int semijoin_group_size
Definition: access_path.h:123
FunctionalDependencySet functional_dependencies
Definition: access_path.h:94
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:99
RelationalExpression * expr
Definition: access_path.h:78
double selectivity
Definition: access_path.h:79
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:117
Item ** semijoin_group
Definition: access_path.h:122
size_t estimated_bytes_per_row
Definition: access_path.h:83
Definition: range_optimizer.h:54
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:82
Definition: materialize_path_parameters.h:41
AccessPath * subquery_path
Definition: materialize_path_parameters.h:42
int select_number
Definition: materialize_path_parameters.h:43
JOIN * join
Definition: materialize_path_parameters.h:44
bool copy_items
Definition: materialize_path_parameters.h:46
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:47
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:45
Definition: materialize_path_parameters.h:39
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:79
Mem_root_array< Operand > m_operands
Definition: materialize_path_parameters.h:57
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:64
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:98
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:91
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:61
int ref_slice
Definition: materialize_path_parameters.h:73
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:67
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:58
Definition: table.h:283
A position of table within a join order.
Definition: sql_select.h:354
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:131
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:144
bool was_join_condition
Definition: access_path.h:157
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:169
FunctionalDependencySet functional_dependencies
Definition: access_path.h:168
int source_multiple_equality_idx
Definition: access_path.h:165
hypergraph::NodeMap used_nodes
Definition: access_path.h:135
Item * condition
Definition: access_path.h:132
double selectivity
Definition: access_path.h:146
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:174
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:144
Definition: table.h:1403