MySQL 8.0.40
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2024, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
26
27#include <assert.h>
28#include <stdint.h>
29
30#include <string>
31#include <type_traits>
32#include <vector>
33
40#include "sql/join_type.h"
41#include "sql/mem_root_array.h"
42#include "sql/sql_array.h"
43#include "sql/sql_class.h"
44#include "sql/table.h"
45
46template <class T>
49class Filesort;
50class Item;
51class Item_func_match;
52class JOIN;
53class KEY;
54class QEP_TAB;
55class QUICK_RANGE;
56class SJ_TMP_TABLE;
57class Table_function;
59class Window;
60struct AccessPath;
63struct Index_lookup;
64struct KEY_PART;
65struct ORDER;
66struct POSITION;
68struct TABLE;
69
70/**
71 A specification that two specific relational expressions
72 (e.g., two tables, or a table and a join between two other tables)
73 should be joined together. The actual join conditions, if any,
74 live inside the “expr” object, as does the join type etc.
75 */
79
80 // If this join is made using a hash join, estimates the width
81 // of each row as stored in the hash table, in bytes.
83
84 // The set of (additional) functional dependencies that are active
85 // after this join predicate has been applied. E.g. if we're joining
86 // on t1.x = t2.x, there will be a bit for that functional dependency.
87 // We don't currently support more complex join conditions, but there's
88 // no conceptual reason why we couldn't, e.g. a join on a = b + c
89 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
90 // or {a, c} → b.
91 //
92 // Used in the processing of interesting orders.
94
95 // A less compact form of functional_dependencies, used during building
96 // (FunctionalDependencySet bitmaps are only available after all functional
97 // indexes have been collected and Build() has been called).
99
100 // If this is a suitable semijoin: Contains the grouping given by the
101 // join key. If the rows are in this grouping, then the join optimizer will
102 // consider deduplicating on it and inverting the join. -1 otherwise.
104
105 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
106 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
107 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
108 Item **semijoin_group = nullptr;
110};
111
112/**
113 A filter of some sort that is not a join condition (those are stored
114 in JoinPredicate objects). AND conditions are typically split up into
115 multiple Predicates.
116 */
117struct Predicate {
119
120 // condition->used_tables(), converted to a NodeMap.
122
123 // tables referred to by the condition, plus any tables whose values
124 // can null any of those tables. (Even when reordering outer joins,
125 // at least one of those tables will still be present on the
126 // left-hand side of the outer join, so this is sufficient.)
127 //
128 // As a special case, we allow setting RAND_TABLE_BIT, even though it
129 // is normally part of a table_map, not a NodeMap.
131
133
134 // Whether this predicate is a join condition after all; it was promoted
135 // to a WHERE predicate since it was part of a cycle (see the comment in
136 // AddCycleEdges()). If it is, it is usually ignored so that we don't
137 // double-apply join conditions -- but if the join in question was not
138 // applied (because the cycle was broken at this point), the predicate
139 // would come into play. This is normally registered on the join itself
140 // (see RelationalExpression::join_predicate_bitmap), but having the bit
141 // on the predicate itself is used to avoid trying to push it down as a
142 // sargable predicate.
143 bool was_join_condition = false;
144
145 // If this is a join condition that came from a multiple equality,
146 // and we have decided to create a mesh from that multiple equality,
147 // returns the index of it into the “multiple_equalities” array
148 // in MakeJoinHypergraph(). (You don't actually need the array to
149 // use this; it's just an opaque index to deduplicate between different
150 // predicates.) Otherwise, -1.
152
153 // See the equivalent fields in JoinPredicate.
156
157 // The list of all subqueries referred to in this predicate, if any.
158 // The optimizer uses this to add their materialized/non-materialized
159 // costs when evaluating filters.
161};
162
166};
167
168/// To indicate that a row estimate is not yet made.
169constexpr double kUnknownRowCount = -1.0;
170
171/**
172 Access paths are a query planning structure that correspond 1:1 to iterators,
173 in that an access path contains pretty much exactly the information
174 needed to instantiate given iterator, plus some information that is only
175 needed during planning, such as costs. (The new join optimizer will extend
176 this somewhat in the future. Some iterators also need the query block,
177 ie., JOIN object, they are part of, but that is implicitly available when
178 constructing the tree.)
179
180 AccessPath objects build on a variant, ie., they can hold an access path of
181 any type (table scan, filter, hash join, sort, etc.), although only one at the
182 same time. Currently, they contain 32 bytes of base information that is common
183 to any access path (type identifier, costs, etc.), and then up to 40 bytes
184 that is type-specific (e.g. for a table scan, the TABLE object). It would be
185 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
186 does not seem to be easy without fairly large contortions.
187
188 We could have solved this by inheritance, but the fixed-size design makes it
189 possible to replace an access path when a better one is found, without
190 introducing a new allocation, which will be important when using them as a
191 planning structure.
192 */
194 // Most of the members are declared with initializers, but bit fields cannot
195 // be declared with initializers until C++20, so we need to define a
196 // constructor to initialize those fields.
197 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
198 // too, and use the default constructor generated by the compiler.
200 : count_examined_rows(false)
201#ifndef NDEBUG
202 ,
203 forced_by_dbug(false)
204#endif
205 {
206 }
207
208 enum Type : uint8_t {
209 // Basic access paths (those with no children, at least nominally).
210 // NOTE: When adding more paths to this section, also update GetBasicTable()
211 // to handle them.
229
230 // Basic access paths that don't correspond to a specific table.
237
238 // Joins.
243
244 // Composite access paths.
260
261 // Access paths that modify tables.
265
266 /// A general enum to describe the safety of a given operation.
267 /// Currently we only use this to describe row IDs, but it can easily
268 /// be reused for safety of updating a table we're reading from
269 /// (the Halloween problem), or just generally unreproducible results
270 /// (e.g. a TABLESAMPLE changing due to external factors).
271 ///
272 /// Less safe values have higher numerical values.
273 enum Safety : uint8_t {
274 /// The given operation is always safe on this access path.
275 SAFE = 0,
276
277 /// The given operation is safe if this access path is scanned once,
278 /// but not if it's scanned multiple times (e.g. used on the inner side
279 /// of a nested-loop join). A typical example of this is a derived table
280 /// or CTE that is rematerialized on each scan, so that references to
281 /// the old values (such as row IDs) are no longer valid.
283
284 /// The given operation is unsafe on this access path, no matter how many
285 /// or few times it's scanned. Often, it may help to materialize it
286 /// (assuming the materialization itself doesn't use the operation
287 /// in question).
288 UNSAFE = 2
289 };
290
291 /// Whether it is safe to get row IDs (for sorting) from this access path.
293
294 /// Whether this access path counts as one that scans a base table,
295 /// and thus should be counted towards examined_rows. It can sometimes
296 /// seem a bit arbitrary which iterators count towards examined_rows
297 /// and which ones do not, so the only canonical reference is the tests.
299
300#ifndef NDEBUG
301 /// Whether this access path is forced preferred over all others by means
302 /// of a SET DEBUG force_subplan_0x... statement.
304#endif
305
306 /// For UPDATE and DELETE statements: The node index of a table which can be
307 /// updated or deleted from immediately as the rows are read from the
308 /// iterator, if this path is only read from once. -1 if there is no such
309 /// table in this path.
310 ///
311 /// Note that this is an index into CostingReceiver's array of nodes, and is
312 /// not necessarily equal to the table number within the query block given by
313 /// Table_ref::tableno().
314 ///
315 /// The table, if any, is currently always the outermost table in the path.
316 ///
317 /// It is possible to have plans where it would be safe to operate
318 /// "immediately" on more than one table. For example, if we do a merge join,
319 /// it is safe to perform immediate deletes on tables on the inner side of the
320 /// join, since both sides are read only once. (However, we currently do not
321 /// support merge joins.)
322 ///
323 /// Another possibility is when the outer table of a nested loop join is
324 /// guaranteed to return at most one row (typically, a unique index lookup
325 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
326 /// nested loop join. But we don't to this yet.
327 ///
328 /// Hash joins read both sides exactly once, However, with hash joins, the
329 /// scans on the inner tables are not positioned on the correct row when the
330 /// result of the join is returned, so the immediate delete logic will need to
331 /// be changed to reposition the underlying scans before doing the immediate
332 /// deletes. While this can be done, it makes the benefit of immediate deletes
333 /// less obvious for these tables, and it can also be a loss in some cases,
334 /// because we lose the deduplication provided by the Unique object used for
335 /// buffered deletes (the immediate deletes could end up spending time
336 /// repositioning to already deleted rows). So we currently don't attempt to
337 /// do immediate deletes from inner tables of hash joins either.
338 ///
339 /// The outer table of a hash join can be deleted from immediately if the
340 /// inner table fits in memory. If the hash join spills to disk, though,
341 /// neither the rows of the outer table nor the rows of the inner table come
342 /// out in the order of the underlying scan, so it is not safe in general to
343 /// perform immediate deletes on the outer table of a hash join.
344 ///
345 /// If support for immediate operations on multiple tables is added,
346 /// this member could be changed from a node index to a NodeMap.
348
349 /// Which ordering the rows produced by this path follow, if any
350 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
351 /// but we don't want to add a dependency on interesting_orders.h from
352 /// this file, so we use the base type instead of the typedef here.
354
355 /// If an iterator has been instantiated for this access path, points to the
356 /// iterator. Used for constructing iterators that need to talk to each other
357 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
358 /// information in EXPLAIN ANALYZE queries.
360
361 /// Expected cost to read all of this access path once; -1.0 for unknown.
362 double cost{-1.0};
363
364 /// Expected cost to initialize this access path; ie., cost to read
365 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
366 /// Note that EXPLAIN prints out cost of reading the _first_ row
367 /// because it is easier for the user and also easier to measure in
368 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
369 /// initialization cost, so that is what we use in this member.
370 /// -1.0 for unknown.
371 double init_cost{-1.0};
372
373 /// Of init_cost, how much of the initialization needs only to be done
374 /// once per query block. (This is a cost, not a proportion.)
375 /// Ie., if the access path can reuse some its initialization work
376 /// if Init() is called multiple times, this member will be nonzero.
377 /// A typical example is a materialized table with rematerialize=false;
378 /// the second time Init() is called, it's a no-op. Most paths will have
379 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
380 /// We do not intend to use this field to model cache effects.
381 ///
382 /// This is currently not printed in EXPLAIN, only optimizer trace.
383 double init_once_cost{0.0};
384
385 /// Return the cost of scanning the given path for the second time
386 /// (or later) in the given query block. This is really the interesting
387 /// metric, not init_once_cost in itself, but since nearly all paths
388 /// have zero init_once_cost, storing that instead allows us to skip
389 /// a lot of repeated path->init_once_cost = path->init_cost calls
390 /// in the code.
391 double rescan_cost() const { return cost - init_once_cost; }
392
393 /// If no filter, identical to num_output_rows, cost, respectively.
394 /// init_cost is always the same (filters have zero initialization cost).
397
398 /// Bitmap of WHERE predicates that we are including on this access path,
399 /// referring to the “predicates” array internal to the join optimizer.
400 /// Since bit masks are much cheaper to deal with than creating Item
401 /// objects, and we don't invent new conditions during join optimization
402 /// (all of them are known when we begin optimization), we stick to
403 /// manipulating bit masks during optimization, saying which filters will be
404 /// applied at this node (a 1-bit means the filter will be applied here; if
405 /// there are multiple ones, they are ANDed together).
406 ///
407 /// This is used during join optimization only; before iterators are
408 /// created, we will add FILTER access paths to represent these instead,
409 /// removing the dependency on the array. Said FILTER paths are by
410 /// convention created with materialize_subqueries = false, since the by far
411 /// most common case is that there are no subqueries in the predicate.
412 /// In other words, if you wish to represent a filter with
413 /// materialize_subqueries = true, you will need to make an explicit FILTER
414 /// node.
415 ///
416 /// See also nested_loop_join().equijoin_predicates, which is for filters
417 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
419
420 /// Bitmap of sargable join predicates that have already been applied
421 /// in this access path by means of an index lookup (ref access),
422 /// again referring to “predicates”, and thus should not be counted again
423 /// for selectivity. Note that the filter may need to be applied
424 /// nevertheless (especially in case of type conversions); see
425 /// subsumed_sargable_join_predicates.
426 ///
427 /// Since these refer to the same array as filter_predicates, they will
428 /// never overlap with filter_predicates, and so we can reuse the same
429 /// memory using an alias (a union would not be allowed, since OverflowBitset
430 /// is a class with non-trivial default constructor), even though the meaning
431 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
432 /// bits 0..(N-1) belong to filter_predicates, and the rest to
433 /// applied_sargable_join_predicates.
435 return filter_predicates;
436 }
438 return filter_predicates;
439 }
440
441 /// Bitmap of WHERE predicates that touch tables we have joined in,
442 /// but that we could not apply yet (for instance because they reference
443 /// other tables, or because because we could not push them down into
444 /// the nullable side of outer joins). Used during planning only
445 /// (see filter_predicates).
447
448 /// Similar to applied_sargable_join_predicates, bitmap of sargable
449 /// join predicates that have been applied and will subsume the join
450 /// predicate entirely, ie., not only should the selectivity not be
451 /// double-counted, but the predicate itself is redundant and need not
452 /// be applied as a filter. (It is an error to have a bit set here but not
453 /// in applied_sargable_join_predicates.)
455 return delayed_predicates;
456 }
458 return delayed_predicates;
459 }
460
461 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
462 /// loaded when rows from this access path are evaluated; that is, this
463 /// access path must be put on the inner side of a nested-loop join (or
464 /// multiple such joins) where the outer side includes all of the given
465 /// tables.
466 ///
467 /// The most obvious case for this is dependent tables in LATERAL, but a more
468 /// common case is when we have pushed join conditions referring to those
469 /// tables; e.g., if this access path represents t1 and we have a condition
470 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
471 /// be set in this bitmap. We can still join in other tables, deferring t2,
472 /// but the bit(s) will then propagate, and we cannot be on the right side of
473 /// a hash join until parameter_tables is zero again. (Also see
474 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
475 /// as an optimization.)
476 ///
477 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
478 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
479 /// that the access path is entirely noncachable, because it depends on
480 /// something nondeterministic or an outer reference, and thus can never be on
481 /// the right side of a hash join, ever.
483
484 /// Auxiliary data used by a secondary storage engine while processing the
485 /// access path during optimization and execution. The secondary storage
486 /// engine is free to store any useful information in this member, for example
487 /// extra statistics or cost estimates. The data pointed to is fully owned by
488 /// the secondary storage engine, and it is the responsibility of the
489 /// secondary engine to manage the memory and make sure it is properly
490 /// destroyed.
491 void *secondary_engine_data{nullptr};
492
493 // Accessors for the union below.
494 auto &table_scan() {
495 assert(type == TABLE_SCAN);
496 return u.table_scan;
497 }
498 const auto &table_scan() const {
499 assert(type == TABLE_SCAN);
500 return u.table_scan;
501 }
502 auto &index_scan() {
503 assert(type == INDEX_SCAN);
504 return u.index_scan;
505 }
506 const auto &index_scan() const {
507 assert(type == INDEX_SCAN);
508 return u.index_scan;
509 }
510 auto &ref() {
511 assert(type == REF);
512 return u.ref;
513 }
514 const auto &ref() const {
515 assert(type == REF);
516 return u.ref;
517 }
518 auto &ref_or_null() {
519 assert(type == REF_OR_NULL);
520 return u.ref_or_null;
521 }
522 const auto &ref_or_null() const {
523 assert(type == REF_OR_NULL);
524 return u.ref_or_null;
525 }
526 auto &eq_ref() {
527 assert(type == EQ_REF);
528 return u.eq_ref;
529 }
530 const auto &eq_ref() const {
531 assert(type == EQ_REF);
532 return u.eq_ref;
533 }
535 assert(type == PUSHED_JOIN_REF);
536 return u.pushed_join_ref;
537 }
538 const auto &pushed_join_ref() const {
539 assert(type == PUSHED_JOIN_REF);
540 return u.pushed_join_ref;
541 }
543 assert(type == FULL_TEXT_SEARCH);
544 return u.full_text_search;
545 }
546 const auto &full_text_search() const {
547 assert(type == FULL_TEXT_SEARCH);
548 return u.full_text_search;
549 }
550 auto &const_table() {
551 assert(type == CONST_TABLE);
552 return u.const_table;
553 }
554 const auto &const_table() const {
555 assert(type == CONST_TABLE);
556 return u.const_table;
557 }
558 auto &mrr() {
559 assert(type == MRR);
560 return u.mrr;
561 }
562 const auto &mrr() const {
563 assert(type == MRR);
564 return u.mrr;
565 }
566 auto &follow_tail() {
567 assert(type == FOLLOW_TAIL);
568 return u.follow_tail;
569 }
570 const auto &follow_tail() const {
571 assert(type == FOLLOW_TAIL);
572 return u.follow_tail;
573 }
575 assert(type == INDEX_RANGE_SCAN);
576 return u.index_range_scan;
577 }
578 const auto &index_range_scan() const {
579 assert(type == INDEX_RANGE_SCAN);
580 return u.index_range_scan;
581 }
582 auto &index_merge() {
583 assert(type == INDEX_MERGE);
584 return u.index_merge;
585 }
586 const auto &index_merge() const {
587 assert(type == INDEX_MERGE);
588 return u.index_merge;
589 }
591 assert(type == ROWID_INTERSECTION);
592 return u.rowid_intersection;
593 }
594 const auto &rowid_intersection() const {
595 assert(type == ROWID_INTERSECTION);
596 return u.rowid_intersection;
597 }
598 auto &rowid_union() {
599 assert(type == ROWID_UNION);
600 return u.rowid_union;
601 }
602 const auto &rowid_union() const {
603 assert(type == ROWID_UNION);
604 return u.rowid_union;
605 }
607 assert(type == INDEX_SKIP_SCAN);
608 return u.index_skip_scan;
609 }
610 const auto &index_skip_scan() const {
611 assert(type == INDEX_SKIP_SCAN);
612 return u.index_skip_scan;
613 }
615 assert(type == GROUP_INDEX_SKIP_SCAN);
616 return u.group_index_skip_scan;
617 }
618 const auto &group_index_skip_scan() const {
619 assert(type == GROUP_INDEX_SKIP_SCAN);
620 return u.group_index_skip_scan;
621 }
624 return u.dynamic_index_range_scan;
625 }
626 const auto &dynamic_index_range_scan() const {
628 return u.dynamic_index_range_scan;
629 }
632 return u.materialized_table_function;
633 }
634 const auto &materialized_table_function() const {
636 return u.materialized_table_function;
637 }
639 assert(type == UNQUALIFIED_COUNT);
640 return u.unqualified_count;
641 }
642 const auto &unqualified_count() const {
643 assert(type == UNQUALIFIED_COUNT);
644 return u.unqualified_count;
645 }
647 assert(type == TABLE_VALUE_CONSTRUCTOR);
648 return u.table_value_constructor;
649 }
650 const auto &table_value_constructor() const {
651 assert(type == TABLE_VALUE_CONSTRUCTOR);
652 return u.table_value_constructor;
653 }
655 assert(type == FAKE_SINGLE_ROW);
656 return u.fake_single_row;
657 }
658 const auto &fake_single_row() const {
659 assert(type == FAKE_SINGLE_ROW);
660 return u.fake_single_row;
661 }
662 auto &zero_rows() {
663 assert(type == ZERO_ROWS);
664 return u.zero_rows;
665 }
666 const auto &zero_rows() const {
667 assert(type == ZERO_ROWS);
668 return u.zero_rows;
669 }
671 assert(type == ZERO_ROWS_AGGREGATED);
672 return u.zero_rows_aggregated;
673 }
674 const auto &zero_rows_aggregated() const {
675 assert(type == ZERO_ROWS_AGGREGATED);
676 return u.zero_rows_aggregated;
677 }
678 auto &hash_join() {
679 assert(type == HASH_JOIN);
680 return u.hash_join;
681 }
682 const auto &hash_join() const {
683 assert(type == HASH_JOIN);
684 return u.hash_join;
685 }
686 auto &bka_join() {
687 assert(type == BKA_JOIN);
688 return u.bka_join;
689 }
690 const auto &bka_join() const {
691 assert(type == BKA_JOIN);
692 return u.bka_join;
693 }
695 assert(type == NESTED_LOOP_JOIN);
696 return u.nested_loop_join;
697 }
698 const auto &nested_loop_join() const {
699 assert(type == NESTED_LOOP_JOIN);
700 return u.nested_loop_join;
701 }
704 return u.nested_loop_semijoin_with_duplicate_removal;
705 }
708 return u.nested_loop_semijoin_with_duplicate_removal;
709 }
710 auto &filter() {
711 assert(type == FILTER);
712 return u.filter;
713 }
714 const auto &filter() const {
715 assert(type == FILTER);
716 return u.filter;
717 }
718 auto &sort() {
719 assert(type == SORT);
720 return u.sort;
721 }
722 const auto &sort() const {
723 assert(type == SORT);
724 return u.sort;
725 }
726 auto &aggregate() {
727 assert(type == AGGREGATE);
728 return u.aggregate;
729 }
730 const auto &aggregate() const {
731 assert(type == AGGREGATE);
732 return u.aggregate;
733 }
735 assert(type == TEMPTABLE_AGGREGATE);
736 return u.temptable_aggregate;
737 }
738 const auto &temptable_aggregate() const {
739 assert(type == TEMPTABLE_AGGREGATE);
740 return u.temptable_aggregate;
741 }
742 auto &limit_offset() {
743 assert(type == LIMIT_OFFSET);
744 return u.limit_offset;
745 }
746 const auto &limit_offset() const {
747 assert(type == LIMIT_OFFSET);
748 return u.limit_offset;
749 }
750 auto &stream() {
751 assert(type == STREAM);
752 return u.stream;
753 }
754 const auto &stream() const {
755 assert(type == STREAM);
756 return u.stream;
757 }
758 auto &materialize() {
759 assert(type == MATERIALIZE);
760 return u.materialize;
761 }
762 const auto &materialize() const {
763 assert(type == MATERIALIZE);
764 return u.materialize;
765 }
768 return u.materialize_information_schema_table;
769 }
772 return u.materialize_information_schema_table;
773 }
774 auto &append() {
775 assert(type == APPEND);
776 return u.append;
777 }
778 const auto &append() const {
779 assert(type == APPEND);
780 return u.append;
781 }
782 auto &window() {
783 assert(type == WINDOW);
784 return u.window;
785 }
786 const auto &window() const {
787 assert(type == WINDOW);
788 return u.window;
789 }
790 auto &weedout() {
791 assert(type == WEEDOUT);
792 return u.weedout;
793 }
794 const auto &weedout() const {
795 assert(type == WEEDOUT);
796 return u.weedout;
797 }
799 assert(type == REMOVE_DUPLICATES);
800 return u.remove_duplicates;
801 }
802 const auto &remove_duplicates() const {
803 assert(type == REMOVE_DUPLICATES);
804 return u.remove_duplicates;
805 }
808 return u.remove_duplicates_on_index;
809 }
810 const auto &remove_duplicates_on_index() const {
812 return u.remove_duplicates_on_index;
813 }
814 auto &alternative() {
815 assert(type == ALTERNATIVE);
816 return u.alternative;
817 }
818 const auto &alternative() const {
819 assert(type == ALTERNATIVE);
820 return u.alternative;
821 }
823 assert(type == CACHE_INVALIDATOR);
824 return u.cache_invalidator;
825 }
826 const auto &cache_invalidator() const {
827 assert(type == CACHE_INVALIDATOR);
828 return u.cache_invalidator;
829 }
830 auto &delete_rows() {
831 assert(type == DELETE_ROWS);
832 return u.delete_rows;
833 }
834 const auto &delete_rows() const {
835 assert(type == DELETE_ROWS);
836 return u.delete_rows;
837 }
838 auto &update_rows() {
839 assert(type == UPDATE_ROWS);
840 return u.update_rows;
841 }
842 const auto &update_rows() const {
843 assert(type == UPDATE_ROWS);
844 return u.update_rows;
845 }
846
847 double num_output_rows() const { return m_num_output_rows; }
848
849 void set_num_output_rows(double val) { m_num_output_rows = val; }
850
851 private:
852 /// Expected number of output rows, -1.0 for unknown.
854
855 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
856 // It is private to force all access to be through the type-checking
857 // accessors.
858 //
859 // For information about the meaning of each value, see the corresponding
860 // row iterator constructors.
861 union {
862 struct {
865 struct {
866 TABLE *table;
867 int idx;
871 struct {
872 TABLE *table;
874 bool use_order;
875 bool reverse;
877 struct {
878 TABLE *table;
880 bool use_order;
882 struct {
883 TABLE *table;
886 struct {
887 TABLE *table;
889 bool use_order;
892 struct {
893 TABLE *table;
895 bool use_order;
899 struct {
900 TABLE *table;
903 struct {
904 TABLE *table;
910 struct {
911 TABLE *table;
913 struct {
914 // The key part(s) we are scanning on. Note that this may be an array.
915 // You can get the table we are working on by looking into
916 // used_key_parts[0].field->table (it is not stored directly, to avoid
917 // going over the AccessPath size limits).
919
920 // The actual ranges we are scanning over (originally derived from “key”).
921 // Not a Bounds_checked_array, to save 4 bytes on the length.
923 unsigned num_ranges;
924
925 unsigned mrr_flags;
926 unsigned mrr_buf_size;
927
928 // Which index (in the TABLE) we are scanning over, and how many of its
929 // key parts we are using.
930 unsigned index;
932
933 // If true, the scan can return rows in rowid order.
935
936 // If true, the scan _should_ return rows in rowid order.
937 // Should only be set if can_be_used_for_ror == true.
939
940 // If true, this plan can be used for index merge scan.
942
943 // See row intersection for more details.
945
946 // Whether we are scanning over a geometry key part.
947 bool geometry : 1;
948
949 // Whether we need a reverse scan. Only supported if geometry == false.
950 bool reverse : 1;
951
952 // For a reverse scan, if we are using extended key parts. It is needed,
953 // to set correct flags when retrieving records.
956 struct {
957 TABLE *table;
962 struct {
963 TABLE *table;
965
966 // Clustered primary key scan, if any.
968
969 bool forced_by_hint;
972
973 // If true, the first child scan should reuse table->file instead of
974 // creating its own. This is true if the intersection is the topmost
975 // range scan, but _not_ if it's below a union. (The reasons for this
976 // are unknown.) It can also be negated by logic involving
977 // retrieve_full_rows and is_covering, again for unknown reasons.
978 //
979 // This is not only for performance; multi-table delete has a hidden
980 // dependency on this behavior when running against certain types of
981 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
982 // when deleting (and not all table types can transfer the position of one
983 // handler to another by using position()).
984 bool reuse_handler;
985
986 // true if no row retrieval phase is necessary.
989 struct {
990 TABLE *table;
992 bool forced_by_hint;
994 struct {
995 TABLE *table;
996 unsigned index;
997 unsigned num_used_key_parts;
998 bool forced_by_hint;
999
1000 // Large, and has nontrivial destructors, so split out into
1001 // its own allocation.
1004 struct {
1005 TABLE *table;
1006 unsigned index;
1007 unsigned num_used_key_parts;
1008 bool forced_by_hint;
1009
1010 // Large, so split out into its own allocation.
1013 struct {
1014 TABLE *table;
1015 QEP_TAB *qep_tab; // Used only for buffering.
1017 struct {
1018 TABLE *table;
1022 struct {
1024
1025 struct {
1026 // No members (implicit from the JOIN).
1028 struct {
1029 // No members.
1031 struct {
1032 // The child is optional. It is only used for keeping track of which
1033 // tables are pruned away by this path, and it is only needed when this
1034 // path is on the inner side of an outer join. See ZeroRowsIterator for
1035 // details. The child of a ZERO_ROWS access path will not be visited by
1036 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1037 // only if called with include_pruned_tables = true. No iterator is
1038 // created for the child, and the child is not shown by EXPLAIN.
1040 // Used for EXPLAIN only.
1041 // TODO(sgunders): make an enum.
1042 const char *cause;
1044 struct {
1045 // Used for EXPLAIN only.
1046 // TODO(sgunders): make an enum.
1047 const char *cause;
1049
1050 struct {
1054 bool store_rowids; // Whether we are below a weedout or not.
1058 struct {
1063 bool store_rowids; // Whether we are below a weedout or not.
1066 struct {
1068 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1072
1073 // Equijoin filters to apply before the join, if any.
1074 // Indexes into join_predicate->expr->equijoin_conditions.
1075 // Non-equijoin conditions are always applied.
1076 // If already_expanded_predicates is true, do not re-expand.
1078
1079 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1080 // this struct needs an initializer, or the union would not be
1081 // default-constructible. If we need more than one union member
1082 // with such an initializer, we would probably need to change
1083 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1084 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1085 nullptr, {}};
1086 struct {
1088 const TABLE *table;
1090 size_t key_len;
1092
1093 struct {
1096
1097 // This parameter, unlike nearly all others, is not passed to the the
1098 // actual iterator. Instead, if true, it signifies that when creating
1099 // the iterator, all materializable subqueries in “condition” should be
1100 // materialized (with any in2exists condition removed first). In the
1101 // very rare case that there are two or more such subqueries, this is
1102 // an all-or-nothing decision, for simplicity.
1103 //
1104 // See FinalizeMaterializedSubqueries().
1107 struct {
1111
1112 // If filesort is nullptr: A new filesort will be created at the
1113 // end of optimization, using this order and flags. Otherwise: Only
1114 // used by EXPLAIN.
1121 struct {
1125 struct {
1128 TABLE *table;
1132 struct {
1134 ha_rows limit;
1138 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1139 // See Query_expression::send_records.
1142 struct {
1146 TABLE *table;
1148 int ref_slice;
1150 struct {
1151 // NOTE: The only legal access paths within table_path are
1152 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE,
1153 // CONST_TABLE (somewhat nonsensical), INDEX_SCAN and DYNAMIC_INDEX_SCAN
1155
1156 // Large, and has nontrivial destructors, so split out
1157 // into its own allocation.
1159 /** The total cost of executing the queries that we materialize.*/
1162 struct {
1165 Item *condition;
1167 struct {
1170 struct {
1175 int ref_slice;
1178 struct {
1183 struct {
1188 struct {
1190 TABLE *table;
1191 KEY *key;
1194 struct {
1196
1197 // For the ref.
1201 struct {
1203 const char *name;
1205 struct {
1210 struct {
1215 } u;
1216};
1217static_assert(std::is_trivially_destructible<AccessPath>::value,
1218 "AccessPath must be trivially destructible, as it is allocated "
1219 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1220 "(because multiple candidates during planning could point to "
1221 "the same access paths, and refcounting would be expensive)");
1222static_assert(sizeof(AccessPath) <= 144,
1223 "We are creating a lot of access paths in the join "
1224 "optimizer, so be sure not to bloat it without noticing. "
1225 "(96 bytes for the base, 48 bytes for the variant.)");
1226
1227inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1229 to->cost = from.cost;
1230 to->init_cost = from.init_cost;
1231 to->init_once_cost = from.init_once_cost;
1233 to->safe_for_rowid = from.safe_for_rowid;
1234 to->ordering_state = from.ordering_state;
1235}
1236
1237// Trivial factory functions for all of the types of access paths above.
1238
1240 bool count_examined_rows) {
1241 AccessPath *path = new (thd->mem_root) AccessPath;
1243 path->count_examined_rows = count_examined_rows;
1244 path->table_scan().table = table;
1245 return path;
1246}
1247
1248inline AccessPath *NewIndexScanAccessPath(THD *thd, TABLE *table, int idx,
1249 bool use_order, bool reverse,
1250 bool count_examined_rows) {
1251 AccessPath *path = new (thd->mem_root) AccessPath;
1253 path->count_examined_rows = count_examined_rows;
1254 path->index_scan().table = table;
1255 path->index_scan().idx = idx;
1256 path->index_scan().use_order = use_order;
1257 path->index_scan().reverse = reverse;
1258 return path;
1259}
1260
1262 bool use_order, bool reverse,
1263 bool count_examined_rows) {
1264 AccessPath *path = new (thd->mem_root) AccessPath;
1265 path->type = AccessPath::REF;
1266 path->count_examined_rows = count_examined_rows;
1267 path->ref().table = table;
1268 path->ref().ref = ref;
1269 path->ref().use_order = use_order;
1270 path->ref().reverse = reverse;
1271 return path;
1272}
1273
1275 Index_lookup *ref, bool use_order,
1276 bool count_examined_rows) {
1277 AccessPath *path = new (thd->mem_root) AccessPath;
1279 path->count_examined_rows = count_examined_rows;
1280 path->ref_or_null().table = table;
1281 path->ref_or_null().ref = ref;
1282 path->ref_or_null().use_order = use_order;
1283 return path;
1284}
1285
1287 bool count_examined_rows) {
1288 AccessPath *path = new (thd->mem_root) AccessPath;
1289 path->type = AccessPath::EQ_REF;
1290 path->count_examined_rows = count_examined_rows;
1291 path->eq_ref().table = table;
1292 path->eq_ref().ref = ref;
1293 return path;
1294}
1295
1297 Index_lookup *ref, bool use_order,
1298 bool is_unique,
1299 bool count_examined_rows) {
1300 AccessPath *path = new (thd->mem_root) AccessPath;
1302 path->count_examined_rows = count_examined_rows;
1303 path->pushed_join_ref().table = table;
1304 path->pushed_join_ref().ref = ref;
1305 path->pushed_join_ref().use_order = use_order;
1306 path->pushed_join_ref().is_unique = is_unique;
1307 return path;
1308}
1309
1312 Item_func_match *ft_func,
1313 bool use_order, bool use_limit,
1314 bool count_examined_rows) {
1315 AccessPath *path = new (thd->mem_root) AccessPath;
1317 path->count_examined_rows = count_examined_rows;
1318 path->full_text_search().table = table;
1319 path->full_text_search().ref = ref;
1320 path->full_text_search().use_order = use_order;
1321 path->full_text_search().use_limit = use_limit;
1322 path->full_text_search().ft_func = ft_func;
1323 return path;
1324}
1325
1328 bool count_examined_rows) {
1329 AccessPath *path = new (thd->mem_root) AccessPath;
1331 path->count_examined_rows = count_examined_rows;
1332 path->set_num_output_rows(1.0);
1333 path->cost = 0.0;
1334 path->init_cost = 0.0;
1335 path->init_once_cost = 0.0;
1336 path->const_table().table = table;
1337 path->const_table().ref = ref;
1338 return path;
1339}
1340
1342 int mrr_flags) {
1343 AccessPath *path = new (thd->mem_root) AccessPath;
1344 path->type = AccessPath::MRR;
1345 path->mrr().table = table;
1346 path->mrr().ref = ref;
1347 path->mrr().mrr_flags = mrr_flags;
1348
1349 // This will be filled in when the BKA iterator is created.
1350 path->mrr().bka_path = nullptr;
1351
1352 return path;
1353}
1354
1356 bool count_examined_rows) {
1357 AccessPath *path = new (thd->mem_root) AccessPath;
1359 path->count_examined_rows = count_examined_rows;
1360 path->follow_tail().table = table;
1361 return path;
1362}
1363
1365 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1366 AccessPath *path = new (thd->mem_root) AccessPath;
1368 path->count_examined_rows = count_examined_rows;
1369 path->dynamic_index_range_scan().table = table;
1370 path->dynamic_index_range_scan().qep_tab = qep_tab;
1371 return path;
1372}
1373
1375 THD *thd, TABLE *table, Table_function *table_function,
1376 AccessPath *table_path) {
1377 AccessPath *path = new (thd->mem_root) AccessPath;
1379 path->materialized_table_function().table = table;
1380 path->materialized_table_function().table_function = table_function;
1381 path->materialized_table_function().table_path = table_path;
1382 return path;
1383}
1384
1386 AccessPath *path = new (thd->mem_root) AccessPath;
1388 return path;
1389}
1390
1392 AccessPath *path = new (thd->mem_root) AccessPath;
1394 // The iterator keeps track of which row it is at in examined_rows,
1395 // so we always need to give it the pointer.
1396 path->count_examined_rows = true;
1397 return path;
1398}
1399
1401 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1402 KEY *key, size_t key_len) {
1403 AccessPath *path = new (thd->mem_root) AccessPath;
1405 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1406 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1407 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1408 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1409 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1410 return path;
1411}
1412
1414 Item *condition) {
1415 AccessPath *path = new (thd->mem_root) AccessPath;
1416 path->type = AccessPath::FILTER;
1417 path->filter().child = child;
1418 path->filter().condition = condition;
1419 path->filter().materialize_subqueries = false;
1420 return path;
1421}
1422
1423// Not inline, because it needs access to filesort internals
1424// (which are forward-declared in this file).
1426 ORDER *order, bool count_examined_rows);
1427
1429 bool rollup) {
1430 AccessPath *path = new (thd->mem_root) AccessPath;
1432 path->aggregate().child = child;
1433 path->aggregate().rollup = rollup;
1434 return path;
1435}
1436
1438 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1439 TABLE *table, AccessPath *table_path, int ref_slice) {
1440 AccessPath *path = new (thd->mem_root) AccessPath;
1442 path->temptable_aggregate().subquery_path = subquery_path;
1443 path->temptable_aggregate().temp_table_param = temp_table_param;
1444 path->temptable_aggregate().table = table;
1445 path->temptable_aggregate().table_path = table_path;
1446 path->temptable_aggregate().ref_slice = ref_slice;
1447 return path;
1448}
1449
1451 ha_rows limit, ha_rows offset,
1452 bool count_all_rows,
1453 bool reject_multiple_rows,
1454 ha_rows *send_records_override) {
1456 AccessPath *path = new (thd->mem_root) AccessPath;
1458 path->immediate_update_delete_table = child->immediate_update_delete_table;
1459 path->limit_offset().child = child;
1460 path->limit_offset().limit = limit;
1461 path->limit_offset().offset = offset;
1462 path->limit_offset().count_all_rows = count_all_rows;
1463 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1464 path->limit_offset().send_records_override = send_records_override;
1465 path->ordering_state = child->ordering_state;
1467 return path;
1468}
1469
1471 bool count_examined_rows) {
1472 AccessPath *path = new (thd->mem_root) AccessPath;
1474 path->count_examined_rows = count_examined_rows;
1475 path->set_num_output_rows(1.0);
1476 path->cost = 0.0;
1477 path->init_cost = 0.0;
1478 path->init_once_cost = 0.0;
1479 return path;
1480}
1481
1483 const char *cause) {
1484 AccessPath *path = new (thd->mem_root) AccessPath;
1486 path->zero_rows().child = child;
1487 path->zero_rows().cause = cause;
1488 path->set_num_output_rows(0.0);
1489 path->cost = 0.0;
1490 path->init_cost = 0.0;
1491 path->init_once_cost = 0.0;
1492 path->num_output_rows_before_filter = 0.0;
1493 path->cost_before_filter = 0.0;
1494 return path;
1495}
1496
1497inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1498 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1499}
1500
1502 const char *cause) {
1503 AccessPath *path = new (thd->mem_root) AccessPath;
1505 path->zero_rows_aggregated().cause = cause;
1506 path->set_num_output_rows(1.0);
1507 path->cost = 0.0;
1508 path->init_cost = 0.0;
1509 return path;
1510}
1511
1513 JOIN *join,
1514 Temp_table_param *temp_table_param,
1515 TABLE *table, int ref_slice) {
1516 AccessPath *path = new (thd->mem_root) AccessPath;
1517 path->type = AccessPath::STREAM;
1518 path->stream().child = child;
1519 path->stream().join = join;
1520 path->stream().temp_table_param = temp_table_param;
1521 path->stream().table = table;
1522 path->stream().ref_slice = ref_slice;
1523 // Will be set later if we get a weedout access path as parent.
1524 path->stream().provide_rowid = false;
1525 return path;
1526}
1527
1530 JOIN *join, bool copy_items,
1531 Temp_table_param *temp_table_param) {
1532 assert(path != nullptr);
1534 MaterializePathParameters::QueryBlock &query_block = array[0];
1535 query_block.subquery_path = path;
1536 query_block.select_number = select_number;
1537 query_block.join = join;
1538 query_block.disable_deduplication_by_hash_field = false;
1539 query_block.copy_items = copy_items;
1540 query_block.temp_table_param = temp_table_param;
1541 return array;
1542}
1543
1545 THD *thd,
1547 Mem_root_array<const AccessPath *> *invalidators, TABLE *table,
1548 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1549 int ref_slice, bool rematerialize, ha_rows limit_rows,
1550 bool reject_multiple_rows) {
1553 param->query_blocks = std::move(query_blocks);
1554 if (rematerialize) {
1555 // There's no point in adding invalidators if we're rematerializing
1556 // every time anyway.
1557 param->invalidators = nullptr;
1558 } else {
1559 param->invalidators = invalidators;
1560 }
1561 param->table = table;
1562 param->cte = cte;
1563 param->unit = unit;
1564 param->ref_slice = ref_slice;
1565 param->rematerialize = rematerialize;
1566 param->limit_rows = (table == nullptr || table->is_union_or_table()
1567 ? limit_rows
1568 :
1569 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1570 // see its constructor
1571 HA_POS_ERROR);
1572 param->reject_multiple_rows = reject_multiple_rows;
1573
1574#ifndef NDEBUG
1575 for (MaterializePathParameters::QueryBlock &query_block :
1576 param->query_blocks) {
1577 assert(query_block.subquery_path != nullptr);
1578 }
1579#endif
1580
1581 AccessPath *path = new (thd->mem_root) AccessPath;
1583 path->materialize().table_path = table_path;
1584 path->materialize().param = param;
1585 path->materialize().subquery_cost = -1.0;
1586 if (rematerialize) {
1587 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1588 } else {
1589 // The default; this is just to be explicit in the code.
1590 path->safe_for_rowid = AccessPath::SAFE;
1591 }
1592 return path;
1593}
1594
1596 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1597 AccessPath *path = new (thd->mem_root) AccessPath;
1599 path->materialize_information_schema_table().table_path = table_path;
1600 path->materialize_information_schema_table().table_list = table_list;
1601 path->materialize_information_schema_table().condition = condition;
1602 return path;
1603}
1604
1605// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1606// long as the access path.
1608 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1609 AccessPath *path = new (thd->mem_root) AccessPath;
1610 path->type = AccessPath::APPEND;
1611 path->append().children = children;
1612 path->cost = 0.0;
1613 path->init_cost = 0.0;
1614 path->init_once_cost = 0.0;
1615 double num_output_rows = 0.0;
1616 for (const AppendPathParameters &child : *children) {
1617 path->cost += child.path->cost;
1618 path->init_cost += child.path->init_cost;
1619 path->init_once_cost += child.path->init_once_cost;
1620 num_output_rows += child.path->num_output_rows();
1621 }
1622 path->set_num_output_rows(num_output_rows);
1623 return path;
1624}
1625
1627 Window *window,
1628 Temp_table_param *temp_table_param,
1629 int ref_slice, bool needs_buffering) {
1630 AccessPath *path = new (thd->mem_root) AccessPath;
1631 path->type = AccessPath::WINDOW;
1632 path->window().child = child;
1633 path->window().window = window;
1634 path->window().temp_table = nullptr;
1635 path->window().temp_table_param = temp_table_param;
1636 path->window().ref_slice = ref_slice;
1637 path->window().needs_buffering = needs_buffering;
1638 return path;
1639}
1640
1642 SJ_TMP_TABLE *weedout_table) {
1643 AccessPath *path = new (thd->mem_root) AccessPath;
1644 path->type = AccessPath::WEEDOUT;
1645 path->weedout().child = child;
1646 path->weedout().weedout_table = weedout_table;
1647 path->weedout().tables_to_get_rowid_for =
1648 0; // Must be handled by the caller.
1649 return path;
1650}
1651
1653 Item **group_items,
1654 int group_items_size) {
1655 AccessPath *path = new (thd->mem_root) AccessPath;
1657 path->remove_duplicates().child = child;
1658 path->remove_duplicates().group_items = group_items;
1659 path->remove_duplicates().group_items_size = group_items_size;
1660 return path;
1661}
1662
1664 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1665 unsigned loosescan_key_len) {
1666 AccessPath *path = new (thd->mem_root) AccessPath;
1668 path->remove_duplicates_on_index().child = child;
1669 path->remove_duplicates_on_index().table = table;
1670 path->remove_duplicates_on_index().key = key;
1671 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1672 return path;
1673}
1674
1676 AccessPath *table_scan_path,
1677 Index_lookup *used_ref) {
1678 AccessPath *path = new (thd->mem_root) AccessPath;
1680 path->alternative().table_scan_path = table_scan_path;
1681 path->alternative().child = child;
1682 path->alternative().used_ref = used_ref;
1683 return path;
1684}
1685
1687 const char *name) {
1688 AccessPath *path = new (thd->mem_root) AccessPath;
1690 path->cache_invalidator().child = child;
1691 path->cache_invalidator().name = name;
1692 return path;
1693}
1694
1697 table_map immediate_tables);
1698
1701 table_map immediate_tables);
1702
1703/**
1704 Modifies "path" and the paths below it so that they provide row IDs for
1705 all tables.
1706 */
1708
1709/**
1710 If the path is a FILTER path marked that subqueries are to be materialized,
1711 do so. If not, do nothing.
1712
1713 It is important that this is not called until the entire plan is ready;
1714 not just when planning a single query block. The reason is that a query
1715 block A with materializable subqueries may itself be part of a materializable
1716 subquery B, so if one calls this when planning A, the subqueries in A will
1717 irrevocably be materialized, even if that is not the optimal plan given B.
1718 Thus, this is done when creating iterators.
1719 */
1721
1724 bool eligible_for_batch_mode);
1725
1726// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1727// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1728// that does anything else is DynamicRangeIterator.)
1730 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1732 eligible_for_batch_mode);
1733}
1734
1735void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1736 const POSITION *pos, bool is_after_filter,
1737 AccessPath *path);
1738
1739/**
1740 Return the TABLE* referred from 'path' if it is a basic access path,
1741 else a nullptr is returned. Temporary tables, such as those used by
1742 sorting, aggregate and subquery materialization are not returned.
1743*/
1745
1746/**
1747 Returns a map of all tables read when `path` or any of its children are
1748 executed. Only iterators that are part of the same query block as `path`
1749 are considered.
1750
1751 If a table is read that doesn't have a map, specifically the temporary
1752 tables made as part of materialization within the same query block,
1753 RAND_TABLE_BIT will be set as a convention and none of that access path's
1754 children will be included in the map. In this case, the caller will need to
1755 manually go in and find said access path, to ask it for its TABLE object.
1756
1757 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1758 access path (ie., pruned away due to impossible join conditions) will be
1759 included in the map. This is normally what you want, as those tables need to
1760 be included whenever you store NULL flags and the likes, but if you don't
1761 want them (perhaps to specifically check for conditions referring to pruned
1762 tables), you can set it to false.
1763 */
1764table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1765
1766/**
1767 Find the list of all tables used by this root, stopping at materializations.
1768 Used for knowing which tables to sort.
1769 */
1771
1772/**
1773 For each access path in the (sub)tree rooted at “path”, expand any use of
1774 “filter_predicates” into newly-inserted FILTER access paths, using the given
1775 predicate list. This is used after finding an optimal set of access paths,
1776 to normalize the tree so that the remaining consumers do not need to worry
1777 about filter_predicates and cost_before_filter.
1778
1779 “join” is the join that “path” is part of.
1780 */
1782 const Mem_root_array<Predicate> &predicates,
1783 unsigned num_where_predicates);
1784
1785/**
1786 Extracts the Item expression from the given “filter_predicates” corresponding
1787 to the given “mask”.
1788 */
1791 int num_where_predicates);
1792
1793/// Like ExpandFilterAccessPaths(), but expands only the single access path
1794/// at “path”.
1796 const Mem_root_array<Predicate> &predicates,
1797 unsigned num_where_predicates);
1798
1799/// Returns the tables that are part of a hash join.
1801
1802#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1512
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1652
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1686
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1261
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:101
bool FinalizeMaterializedSubqueries(THD *thd, JOIN *join, AccessPath *path)
If the path is a FILTER path marked that subqueries are to be materialized, do so.
Definition: access_path.cc:308
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1227
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1352
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1310
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1437
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1474
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1364
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1501
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1675
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:113
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1341
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:215
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1607
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1400
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1663
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1470
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1341
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1413
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:169
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:66
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1374
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::QueryBlock > query_blocks, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1544
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:246
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1239
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1462
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1595
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1197
Mem_root_array< MaterializePathParameters::QueryBlock > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1529
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, bool rollup)
Definition: access_path.h:1428
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:162
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1274
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1450
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1286
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1626
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1355
AccessPath * NewTableValueConstructorAccessPath(THD *thd)
Definition: access_path.h:1391
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1326
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:378
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1641
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1296
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1482
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1385
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1248
A wrapper class which provides array bounds checking.
Definition: sql_array.h:47
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4329
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:52
Sorting related info.
Definition: filesort.h:52
Definition: item_func.h:3396
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:853
Definition: sql_optimizer.h:133
Definition: key.h:113
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:426
Definition: overflow_bitset.h:77
Definition: sql_executor.h:260
Definition: range_optimizer.h:69
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:623
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
Definition: sql_executor.h:104
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:34
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:38
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2791
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:95
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:105
static MEM_ROOT mem_root
Definition: client_plugin.cc:110
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a LIMIT_OFFSET AccessPath.
Definition: cost_model.cc:887
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:367
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1913
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:63
JoinType
Definition: join_type.h:28
static mi_bit_type mask[]
Definition: mi_packrec.cc:141
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:489
my_off_t ha_rows
Definition: my_base.h:1140
#define HA_POS_ERROR
Definition: my_base.h:1142
uint64_t table_map
Definition: my_table_map.h:30
static char * path
Definition: mysqldump.cc:137
PT & ref(PT *tp)
Definition: tablespace_impl.cc:359
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:40
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:132
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:151
int delete_tables(PFS_engine_table_share_proxy **, unsigned int) noexcept
Definition: pfs_plugin_table_v1_all_empty.cc:39
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
case opt name
Definition: sslopt-case.h:33
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:193
struct AccessPath::@56::@92 append
struct AccessPath::@56::@76 table_value_constructor
auto & weedout()
Definition: access_path.h:790
struct AccessPath::@56::@83 nested_loop_semijoin_with_duplicate_removal
auto & filter()
Definition: access_path.h:710
bool count_all_rows
Definition: access_path.h:1136
AccessPath * bka_path
Definition: access_path.h:906
struct AccessPath::@56::@75 unqualified_count
struct AccessPath::@56::@68 index_merge
auto & ref_or_null()
Definition: access_path.h:518
auto & materialized_table_function()
Definition: access_path.h:630
AccessPath * cpk_child
Definition: access_path.h:967
auto & rowid_union()
Definition: access_path.h:598
double cost_before_filter
Definition: access_path.h:396
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:482
const auto & bka_join() const
Definition: access_path.h:690
TABLE * temp_table
Definition: access_path.h:1173
OverflowBitset equijoin_predicates
Definition: access_path.h:1077
Item ** group_items
Definition: access_path.h:1185
struct AccessPath::@56::@62 pushed_join_ref
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:391
bool rollup
Definition: access_path.h:1123
auto & materialize()
Definition: access_path.h:758
const auto & temptable_aggregate() const
Definition: access_path.h:738
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:454
auto & index_scan()
Definition: access_path.h:502
const auto & delete_rows() const
Definition: access_path.h:834
const auto & filter() const
Definition: access_path.h:714
auto & delete_rows()
Definition: access_path.h:830
bool reuse_handler
Definition: access_path.h:944
struct AccessPath::@56::@86 aggregate
KEY_PART * used_key_part
Definition: access_path.h:918
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:434
Item * condition
Definition: access_path.h:1095
const auto & index_merge() const
Definition: access_path.h:586
const auto & update_rows() const
Definition: access_path.h:842
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:814
struct AccessPath::@56::@84 filter
auto & pushed_join_ref()
Definition: access_path.h:534
AccessPath * outer
Definition: access_path.h:1051
union AccessPath::@56 u
auto & index_skip_scan()
Definition: access_path.h:606
bool rewrite_semi_to_inner
Definition: access_path.h:1055
const auto & zero_rows_aggregated() const
Definition: access_path.h:674
auto & group_index_skip_scan()
Definition: access_path.h:614
struct AccessPath::@56::@80 hash_join
struct AccessPath::@56::@82 nested_loop_join
const auto & eq_ref() const
Definition: access_path.h:530
int group_items_size
Definition: access_path.h:1186
bool use_order
Definition: access_path.h:868
bool pfs_batch_mode
Definition: access_path.h:1069
auto & temptable_aggregate()
Definition: access_path.h:734
AccessPath()
Definition: access_path.h:199
GroupIndexSkipScanParameters * param
Definition: access_path.h:1011
auto & rowid_intersection()
Definition: access_path.h:590
bool materialize_subqueries
Definition: access_path.h:1105
AccessPath * child
Definition: access_path.h:1039
bool allow_spill_to_disk
Definition: access_path.h:1053
Index_lookup * used_ref
Definition: access_path.h:1199
auto & mrr()
Definition: access_path.h:558
const auto & index_scan() const
Definition: access_path.h:506
const auto & fake_single_row() const
Definition: access_path.h:658
bool reject_multiple_rows
Definition: access_path.h:1137
bool allow_clustered_primary_key_scan
Definition: access_path.h:959
const char * name
Definition: access_path.h:1203
bool use_limit
Definition: access_path.h:896
Window * window
Definition: access_path.h:1172
table_map tables_to_get_rowid_for
Definition: access_path.h:1056
auto & materialize_information_schema_table()
Definition: access_path.h:766
auto & bka_join()
Definition: access_path.h:686
const auto & follow_tail() const
Definition: access_path.h:570
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1180
Index_lookup * ref
Definition: access_path.h:873
const auto & window() const
Definition: access_path.h:786
const auto & materialized_table_function() const
Definition: access_path.h:634
auto & unqualified_count()
Definition: access_path.h:638
bool keep_current_rowid
Definition: access_path.h:908
struct AccessPath::@56::@79 zero_rows_aggregated
bool using_extended_key_parts
Definition: access_path.h:954
struct AccessPath::@56::@99 delete_rows
auto & nested_loop_join()
Definition: access_path.h:694
const auto & full_text_search() const
Definition: access_path.h:546
struct AccessPath::@56::@65 mrr
bool can_be_used_for_ror
Definition: access_path.h:934
float rec_per_key
Definition: access_path.h:1062
struct AccessPath::@56::@100 update_rows
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:292
JOIN * join
Definition: access_path.h:1144
unsigned index
Definition: access_path.h:930
unsigned mrr_buf_size
Definition: access_path.h:926
struct AccessPath::@56::@74 materialized_table_function
auto & remove_duplicates()
Definition: access_path.h:798
struct AccessPath::@56::@85 sort
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:359
const auto & stream() const
Definition: access_path.h:754
double init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:371
struct AccessPath::@56::@66 follow_tail
const auto & remove_duplicates() const
Definition: access_path.h:802
auto & window()
Definition: access_path.h:782
bool need_rows_in_rowid_order
Definition: access_path.h:938
Table_ref * table_list
Definition: access_path.h:1164
const auto & unqualified_count() const
Definition: access_path.h:642
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:491
struct AccessPath::@56::@78 zero_rows
void set_num_output_rows(double val)
Definition: access_path.h:849
bool is_covering
Definition: access_path.h:987
const auto & mrr() const
Definition: access_path.h:562
bool remove_duplicates
Definition: access_path.h:1117
table_map tables_to_update
Definition: access_path.h:1212
const auto & table_value_constructor() const
Definition: access_path.h:650
KEY * key
Definition: access_path.h:1089
struct AccessPath::@56::@57 table_scan
auto & table_value_constructor()
Definition: access_path.h:646
auto & stream()
Definition: access_path.h:750
size_t key_len
Definition: access_path.h:1090
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:457
const auto & zero_rows() const
Definition: access_path.h:666
const auto & append() const
Definition: access_path.h:778
ha_rows offset
Definition: access_path.h:1135
int idx
Definition: access_path.h:867
auto & hash_join()
Definition: access_path.h:678
auto & cache_invalidator()
Definition: access_path.h:822
bool force_sort_rowids
Definition: access_path.h:1119
unsigned num_used_key_parts
Definition: access_path.h:931
unsigned num_ranges
Definition: access_path.h:923
ORDER * order
Definition: access_path.h:1115
auto & sort()
Definition: access_path.h:718
auto & fake_single_row()
Definition: access_path.h:654
ha_rows limit
Definition: access_path.h:1116
struct AccessPath::@56::@73 dynamic_index_range_scan
struct AccessPath::@56::@67 index_range_scan
auto & follow_tail()
Definition: access_path.h:566
double m_num_output_rows
Expected number of output rows, -1.0 for unknown.
Definition: access_path.h:853
const JoinPredicate * join_predicate
Definition: access_path.h:1052
const auto & const_table() const
Definition: access_path.h:554
const auto & dynamic_index_range_scan() const
Definition: access_path.h:626
const auto & table_scan() const
Definition: access_path.h:498
unsigned mrr_flags
Definition: access_path.h:925
int ref_slice
Definition: access_path.h:1130
struct AccessPath::@56::@60 ref_or_null
struct AccessPath::@56::@96 remove_duplicates_on_index
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:418
bool can_be_used_for_imerge
Definition: access_path.h:941
bool forced_by_hint
Definition: access_path.h:958
auto & limit_offset()
Definition: access_path.h:742
AccessPath * inner
Definition: access_path.h:1051
bool provide_rowid
Definition: access_path.h:1147
struct AccessPath::@56::@81 bka_join
const auto & index_range_scan() const
Definition: access_path.h:578
const auto & ref() const
Definition: access_path.h:514
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:303
Item_func_match * ft_func
Definition: access_path.h:897
struct AccessPath::@56::@58 index_scan
ha_rows * send_records_override
Definition: access_path.h:1140
auto & table_scan()
Definition: access_path.h:494
const auto & hash_join() const
Definition: access_path.h:682
QEP_TAB * qep_tab
Definition: access_path.h:1015
Table_function * table_function
Definition: access_path.h:1019
auto & zero_rows_aggregated()
Definition: access_path.h:670
const auto & sort() const
Definition: access_path.h:722
bool unwrap_rollup
Definition: access_path.h:1118
double init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:383
struct AccessPath::@56::@77 fake_single_row
Type
Definition: access_path.h:208
@ FOLLOW_TAIL
Definition: access_path.h:221
@ FILTER
Definition: access_path.h:245
@ PUSHED_JOIN_REF
Definition: access_path.h:217
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:234
@ UPDATE_ROWS
Definition: access_path.h:263
@ AGGREGATE
Definition: access_path.h:247
@ BKA_JOIN
Definition: access_path.h:241
@ ZERO_ROWS
Definition: access_path.h:233
@ CONST_TABLE
Definition: access_path.h:219
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:227
@ INDEX_RANGE_SCAN
Definition: access_path.h:222
@ UNQUALIFIED_COUNT
Definition: access_path.h:236
@ EQ_REF
Definition: access_path.h:216
@ FAKE_SINGLE_ROW
Definition: access_path.h:232
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:252
@ WINDOW
Definition: access_path.h:254
@ REF_OR_NULL
Definition: access_path.h:215
@ MATERIALIZE
Definition: access_path.h:251
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:240
@ ROWID_UNION
Definition: access_path.h:225
@ INDEX_SKIP_SCAN
Definition: access_path.h:226
@ MRR
Definition: access_path.h:220
@ CACHE_INVALIDATOR
Definition: access_path.h:259
@ INDEX_SCAN
Definition: access_path.h:213
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:231
@ WEEDOUT
Definition: access_path.h:255
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:235
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:257
@ TABLE_SCAN
Definition: access_path.h:212
@ REF
Definition: access_path.h:214
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:248
@ LIMIT_OFFSET
Definition: access_path.h:249
@ APPEND
Definition: access_path.h:253
@ NESTED_LOOP_JOIN
Definition: access_path.h:239
@ INDEX_MERGE
Definition: access_path.h:223
@ FULL_TEXT_SEARCH
Definition: access_path.h:218
@ ALTERNATIVE
Definition: access_path.h:258
@ STREAM
Definition: access_path.h:250
@ REMOVE_DUPLICATES
Definition: access_path.h:256
@ ROWID_INTERSECTION
Definition: access_path.h:224
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:228
@ DELETE_ROWS
Definition: access_path.h:262
@ SORT
Definition: access_path.h:246
@ HASH_JOIN
Definition: access_path.h:242
bool retrieve_full_rows
Definition: access_path.h:970
const auto & rowid_union() const
Definition: access_path.h:602
const TABLE * table
Definition: access_path.h:1088
const auto & index_skip_scan() const
Definition: access_path.h:610
auto & dynamic_index_range_scan()
Definition: access_path.h:622
const auto & pushed_join_ref() const
Definition: access_path.h:538
auto & remove_duplicates_on_index()
Definition: access_path.h:806
table_map immediate_tables
Definition: access_path.h:1208
int mrr_flags
Definition: access_path.h:907
auto & full_text_search()
Definition: access_path.h:542
const auto & materialize() const
Definition: access_path.h:762
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:353
auto & eq_ref()
Definition: access_path.h:526
JoinType join_type
Definition: access_path.h:1060
const auto & limit_offset() const
Definition: access_path.h:746
MaterializePathParameters * param
Definition: access_path.h:1158
const char * cause
Definition: access_path.h:1042
struct AccessPath::@56::@71 index_skip_scan
struct AccessPath::@56::@87 temptable_aggregate
auto & update_rows()
Definition: access_path.h:838
auto & index_merge()
Definition: access_path.h:582
auto & aggregate()
Definition: access_path.h:726
struct AccessPath::@56::@97 alternative
struct AccessPath::@56::@88 limit_offset
struct AccessPath::@56::@90 materialize
struct AccessPath::@56::@98 cache_invalidator
struct AccessPath::@56::@91 materialize_information_schema_table
const auto & group_index_skip_scan() const
Definition: access_path.h:618
auto & zero_rows()
Definition: access_path.h:662
AccessPath * subquery_path
Definition: access_path.h:1126
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1168
struct AccessPath::@56::@61 eq_ref
double cost
Expected cost to read all of this access path once; -1.0 for unknown.
Definition: access_path.h:362
struct AccessPath::@56::@63 full_text_search
const auto & nested_loop_join() const
Definition: access_path.h:698
bool already_expanded_predicates
Definition: access_path.h:1070
struct AccessPath::@56::@72 group_index_skip_scan
const auto & remove_duplicates_on_index() const
Definition: access_path.h:810
Temp_table_param * temp_table_param
Definition: access_path.h:1127
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:702
struct AccessPath::@56::@94 weedout
bool reverse
Definition: access_path.h:869
AccessPath * table_scan_path
Definition: access_path.h:1195
const auto & materialize_information_schema_table() const
Definition: access_path.h:770
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1160
bool geometry
Definition: access_path.h:947
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:298
unsigned loosescan_key_len
Definition: access_path.h:1192
auto & ref()
Definition: access_path.h:510
struct AccessPath::@56::@64 const_table
table_map tables_to_delete_from
Definition: access_path.h:1207
IndexSkipScanParameters * param
Definition: access_path.h:1002
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:437
Mem_root_array< AccessPath * > * children
Definition: access_path.h:960
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:446
AccessPath * table_path
Definition: access_path.h:1020
const auto & aggregate() const
Definition: access_path.h:730
TABLE * table
Definition: access_path.h:863
auto & append()
Definition: access_path.h:774
const auto & weedout() const
Definition: access_path.h:794
const auto & cache_invalidator() const
Definition: access_path.h:826
bool needs_buffering
Definition: access_path.h:1176
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:347
QUICK_RANGE ** ranges
Definition: access_path.h:922
bool is_unique
Definition: access_path.h:890
struct AccessPath::@56::@70 rowid_union
const auto & alternative() const
Definition: access_path.h:818
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:273
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:282
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:288
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:275
auto & const_table()
Definition: access_path.h:550
unsigned mrr_length_per_rec
Definition: access_path.h:1061
const auto & rowid_intersection() const
Definition: access_path.h:594
struct AccessPath::@56::@89 stream
auto & index_range_scan()
Definition: access_path.h:574
double num_output_rows() const
Definition: access_path.h:847
double num_output_rows_before_filter
If no filter, identical to num_output_rows, cost, respectively.
Definition: access_path.h:395
const auto & ref_or_null() const
Definition: access_path.h:522
struct AccessPath::@56::@69 rowid_intersection
Filesort * filesort
Definition: access_path.h:1109
bool store_rowids
Definition: access_path.h:1054
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:706
Definition: access_path.h:163
JOIN * join
Definition: access_path.h:165
AccessPath * path
Definition: access_path.h:164
Definition: group_index_skip_scan_plan.h:45
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:73
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:67
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:76
int semijoin_group_size
Definition: access_path.h:109
FunctionalDependencySet functional_dependencies
Definition: access_path.h:93
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:98
RelationalExpression * expr
Definition: access_path.h:77
double selectivity
Definition: access_path.h:78
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:103
Item ** semijoin_group
Definition: access_path.h:108
size_t estimated_bytes_per_row
Definition: access_path.h:82
Definition: range_optimizer.h:55
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:83
Definition: materialize_path_parameters.h:42
int select_number
Definition: materialize_path_parameters.h:44
bool copy_items
Definition: materialize_path_parameters.h:47
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:48
AccessPath * subquery_path
Definition: materialize_path_parameters.h:43
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:46
JOIN * join
Definition: materialize_path_parameters.h:45
Definition: materialize_path_parameters.h:40
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:80
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:65
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:99
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:92
Mem_root_array< QueryBlock > query_blocks
Definition: materialize_path_parameters.h:58
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:62
int ref_slice
Definition: materialize_path_parameters.h:74
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:68
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:59
Definition: table.h:282
A position of table within a join order.
Definition: sql_select.h:352
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:117
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:130
bool was_join_condition
Definition: access_path.h:143
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:155
FunctionalDependencySet functional_dependencies
Definition: access_path.h:154
int source_multiple_equality_idx
Definition: access_path.h:151
hypergraph::NodeMap used_nodes
Definition: access_path.h:121
Item * condition
Definition: access_path.h:118
double selectivity
Definition: access_path.h:132
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:160
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:81
Definition: table.h:1399
bool is_union_or_table() const
Test if this tmp table stores the result of a UNION set operation or a single table.
Definition: table.h:1551