MySQL 8.0.32
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2022, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
24#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25
26#include <assert.h>
27#include <stdint.h>
28
29#include <string>
30#include <type_traits>
31#include <vector>
32
39#include "sql/join_type.h"
40#include "sql/mem_root_array.h"
41#include "sql/sql_array.h"
42#include "sql/sql_class.h"
43#include "sql/table.h"
44
45template <class T>
48class Filesort;
49class Item;
50class Item_func_match;
51class JOIN;
52class KEY;
53class QEP_TAB;
54class QUICK_RANGE;
55class SJ_TMP_TABLE;
56class Table_function;
58class Window;
59struct AccessPath;
62struct Index_lookup;
63struct KEY_PART;
64struct ORDER;
65struct POSITION;
67struct TABLE;
68
69/**
70 A specification that two specific relational expressions
71 (e.g., two tables, or a table and a join between two other tables)
72 should be joined together. The actual join conditions, if any,
73 live inside the “expr” object, as does the join type etc.
74 */
78
79 // If this join is made using a hash join, estimates the width
80 // of each row as stored in the hash table, in bytes.
82
83 // The set of (additional) functional dependencies that are active
84 // after this join predicate has been applied. E.g. if we're joining
85 // on t1.x = t2.x, there will be a bit for that functional dependency.
86 // We don't currently support more complex join conditions, but there's
87 // no conceptual reason why we couldn't, e.g. a join on a = b + c
88 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
89 // or {a, c} → b.
90 //
91 // Used in the processing of interesting orders.
93
94 // A less compact form of functional_dependencies, used during building
95 // (FunctionalDependencySet bitmaps are only available after all functional
96 // indexes have been collected and Build() has been called).
98
99 // If this is a suitable semijoin: Contains the grouping given by the
100 // join key. If the rows are in this grouping, then the join optimizer will
101 // consider deduplicating on it and inverting the join. -1 otherwise.
103
104 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
105 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
106 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
107 Item **semijoin_group = nullptr;
109};
110
111/**
112 A filter of some sort that is not a join condition (those are stored
113 in JoinPredicate objects). AND conditions are typically split up into
114 multiple Predicates.
115 */
116struct Predicate {
118
119 // condition->used_tables(), converted to a NodeMap.
121
122 // tables referred to by the condition, plus any tables whose values
123 // can null any of those tables. (Even when reordering outer joins,
124 // at least one of those tables will still be present on the
125 // left-hand side of the outer join, so this is sufficient.)
126 //
127 // As a special case, we allow setting RAND_TABLE_BIT, even though it
128 // is normally part of a table_map, not a NodeMap.
130
132
133 // Whether this predicate is a join condition after all; it was promoted
134 // to a WHERE predicate since it was part of a cycle (see the comment in
135 // AddCycleEdges()). If it is, it is usually ignored so that we don't
136 // double-apply join conditions -- but if the join in question was not
137 // applied (because the cycle was broken at this point), the predicate
138 // would come into play. This is normally registered on the join itself
139 // (see RelationalExpression::join_predicate_bitmap), but having the bit
140 // on the predicate itself is used to avoid trying to push it down as a
141 // sargable predicate.
142 bool was_join_condition = false;
143
144 // If this is a join condition that came from a multiple equality,
145 // and we have decided to create a mesh from that multiple equality,
146 // returns the index of it into the “multiple_equalities” array
147 // in MakeJoinHypergraph(). (You don't actually need the array to
148 // use this; it's just an opaque index to deduplicate between different
149 // predicates.) Otherwise, -1.
151
152 // See the equivalent fields in JoinPredicate.
155
156 // The list of all subqueries referred to in this predicate, if any.
157 // The optimizer uses this to add their materialized/non-materialized
158 // costs when evaluating filters.
160};
161
165};
166
167/**
168 Access paths are a query planning structure that correspond 1:1 to iterators,
169 in that an access path contains pretty much exactly the information
170 needed to instantiate given iterator, plus some information that is only
171 needed during planning, such as costs. (The new join optimizer will extend
172 this somewhat in the future. Some iterators also need the query block,
173 ie., JOIN object, they are part of, but that is implicitly available when
174 constructing the tree.)
175
176 AccessPath objects build on a variant, ie., they can hold an access path of
177 any type (table scan, filter, hash join, sort, etc.), although only one at the
178 same time. Currently, they contain 32 bytes of base information that is common
179 to any access path (type identifier, costs, etc.), and then up to 40 bytes
180 that is type-specific (e.g. for a table scan, the TABLE object). It would be
181 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
182 does not seem to be easy without fairly large contortions.
183
184 We could have solved this by inheritance, but the fixed-size design makes it
185 possible to replace an access path when a better one is found, without
186 introducing a new allocation, which will be important when using them as a
187 planning structure.
188 */
190 // Most of the members are declared with initializers, but bit fields cannot
191 // be declared with initializers until C++20, so we need to define a
192 // constructor to initialize those fields.
193 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
194 // too, and use the default constructor generated by the compiler.
196 : count_examined_rows(false)
197#ifndef NDEBUG
198 ,
199 forced_by_dbug(false)
200#endif
201 {
202 }
203
204 enum Type : uint8_t {
205 // Basic access paths (those with no children, at least nominally).
206 // NOTE: When adding more paths to this section, also update GetBasicTable()
207 // to handle them.
225
226 // Basic access paths that don't correspond to a specific table.
233
234 // Joins.
239
240 // Composite access paths.
256
257 // Access paths that modify tables.
261
262 /// A general enum to describe the safety of a given operation.
263 /// Currently we only use this to describe row IDs, but it can easily
264 /// be reused for safety of updating a table we're reading from
265 /// (the Halloween problem), or just generally unreproducible results
266 /// (e.g. a TABLESAMPLE changing due to external factors).
267 ///
268 /// Less safe values have higher numerical values.
269 enum Safety : uint8_t {
270 /// The given operation is always safe on this access path.
271 SAFE = 0,
272
273 /// The given operation is safe if this access path is scanned once,
274 /// but not if it's scanned multiple times (e.g. used on the inner side
275 /// of a nested-loop join). A typical example of this is a derived table
276 /// or CTE that is rematerialized on each scan, so that references to
277 /// the old values (such as row IDs) are no longer valid.
279
280 /// The given operation is unsafe on this access path, no matter how many
281 /// or few times it's scanned. Often, it may help to materialize it
282 /// (assuming the materialization itself doesn't use the operation
283 /// in question).
284 UNSAFE = 2
285 };
286
287 /// Whether it is safe to get row IDs (for sorting) from this access path.
289
290 /// Whether this access path counts as one that scans a base table,
291 /// and thus should be counted towards examined_rows. It can sometimes
292 /// seem a bit arbitrary which iterators count towards examined_rows
293 /// and which ones do not, so the only canonical reference is the tests.
295
296#ifndef NDEBUG
297 /// Whether this access path is forced preferred over all others by means
298 /// of a SET DEBUG force_subplan_0x... statement.
300#endif
301
302 /// For UPDATE and DELETE statements: The node index of a table which can be
303 /// updated or deleted from immediately as the rows are read from the
304 /// iterator, if this path is only read from once. -1 if there is no such
305 /// table in this path.
306 ///
307 /// Note that this is an index into CostingReceiver's array of nodes, and is
308 /// not necessarily equal to the table number within the query block given by
309 /// Table_ref::tableno().
310 ///
311 /// The table, if any, is currently always the outermost table in the path.
312 ///
313 /// It is possible to have plans where it would be safe to operate
314 /// "immediately" on more than one table. For example, if we do a merge join,
315 /// it is safe to perform immediate deletes on tables on the inner side of the
316 /// join, since both sides are read only once. (However, we currently do not
317 /// support merge joins.)
318 ///
319 /// Another possibility is when the outer table of a nested loop join is
320 /// guaranteed to return at most one row (typically, a unique index lookup
321 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
322 /// nested loop join. But we don't to this yet.
323 ///
324 /// Hash joins read both sides exactly once, However, with hash joins, the
325 /// scans on the inner tables are not positioned on the correct row when the
326 /// result of the join is returned, so the immediate delete logic will need to
327 /// be changed to reposition the underlying scans before doing the immediate
328 /// deletes. While this can be done, it makes the benefit of immediate deletes
329 /// less obvious for these tables, and it can also be a loss in some cases,
330 /// because we lose the deduplication provided by the Unique object used for
331 /// buffered deletes (the immediate deletes could end up spending time
332 /// repositioning to already deleted rows). So we currently don't attempt to
333 /// do immediate deletes from inner tables of hash joins either.
334 ///
335 /// The outer table of a hash join can be deleted from immediately if the
336 /// inner table fits in memory. If the hash join spills to disk, though,
337 /// neither the rows of the outer table nor the rows of the inner table come
338 /// out in the order of the underlying scan, so it is not safe in general to
339 /// perform immediate deletes on the outer table of a hash join.
340 ///
341 /// If support for immediate operations on multiple tables is added,
342 /// this member could be changed from a node index to a NodeMap.
344
345 /// Which ordering the rows produced by this path follow, if any
346 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
347 /// but we don't want to add a dependency on interesting_orders.h from
348 /// this file, so we use the base type instead of the typedef here.
350
351 /// If an iterator has been instantiated for this access path, points to the
352 /// iterator. Used for constructing iterators that need to talk to each other
353 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
354 /// information in EXPLAIN ANALYZE queries.
356
357 /// Expected cost to read all of this access path once; -1.0 for unknown.
358 double cost{-1.0};
359
360 /// Expected cost to initialize this access path; ie., cost to read
361 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
362 /// Note that EXPLAIN prints out cost of reading the _first_ row
363 /// because it is easier for the user and also easier to measure in
364 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
365 /// initialization cost, so that is what we use in this member.
366 /// -1.0 for unknown.
367 double init_cost{-1.0};
368
369 /// Of init_cost, how much of the initialization needs only to be done
370 /// once per query block. (This is a cost, not a proportion.)
371 /// Ie., if the access path can reuse some its initialization work
372 /// if Init() is called multiple times, this member will be nonzero.
373 /// A typical example is a materialized table with rematerialize=false;
374 /// the second time Init() is called, it's a no-op. Most paths will have
375 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
376 /// We do not intend to use this field to model cache effects.
377 ///
378 /// This is currently not printed in EXPLAIN, only optimizer trace.
379 double init_once_cost{0.0};
380
381 /// Return the cost of scanning the given path for the second time
382 /// (or later) in the given query block. This is really the interesting
383 /// metric, not init_once_cost in itself, but since nearly all paths
384 /// have zero init_once_cost, storing that instead allows us to skip
385 /// a lot of repeated path->init_once_cost = path->init_cost calls
386 /// in the code.
387 double rescan_cost() const { return cost - init_once_cost; }
388
389 /// If no filter, identical to num_output_rows, cost, respectively.
390 /// init_cost is always the same (filters have zero initialization cost).
392
393 /// Bitmap of WHERE predicates that we are including on this access path,
394 /// referring to the “predicates” array internal to the join optimizer.
395 /// Since bit masks are much cheaper to deal with than creating Item
396 /// objects, and we don't invent new conditions during join optimization
397 /// (all of them are known when we begin optimization), we stick to
398 /// manipulating bit masks during optimization, saying which filters will be
399 /// applied at this node (a 1-bit means the filter will be applied here; if
400 /// there are multiple ones, they are ANDed together).
401 ///
402 /// This is used during join optimization only; before iterators are
403 /// created, we will add FILTER access paths to represent these instead,
404 /// removing the dependency on the array. Said FILTER paths are by
405 /// convention created with materialize_subqueries = false, since the by far
406 /// most common case is that there are no subqueries in the predicate.
407 /// In other words, if you wish to represent a filter with
408 /// materialize_subqueries = true, you will need to make an explicit FILTER
409 /// node.
410 ///
411 /// See also nested_loop_join().equijoin_predicates, which is for filters
412 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
414
415 /// Bitmap of sargable join predicates that have already been applied
416 /// in this access path by means of an index lookup (ref access),
417 /// again referring to “predicates”, and thus should not be counted again
418 /// for selectivity. Note that the filter may need to be applied
419 /// nevertheless (especially in case of type conversions); see
420 /// subsumed_sargable_join_predicates.
421 ///
422 /// Since these refer to the same array as filter_predicates, they will
423 /// never overlap with filter_predicates, and so we can reuse the same
424 /// memory using an alias (a union would not be allowed, since OverflowBitset
425 /// is a class with non-trivial default constructor), even though the meaning
426 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
427 /// bits 0..(N-1) belong to filter_predicates, and the rest to
428 /// applied_sargable_join_predicates.
430 return filter_predicates;
431 }
433 return filter_predicates;
434 }
435
436 /// Bitmap of WHERE predicates that touch tables we have joined in,
437 /// but that we could not apply yet (for instance because they reference
438 /// other tables, or because because we could not push them down into
439 /// the nullable side of outer joins). Used during planning only
440 /// (see filter_predicates).
442
443 /// Similar to applied_sargable_join_predicates, bitmap of sargable
444 /// join predicates that have been applied and will subsume the join
445 /// predicate entirely, ie., not only should the selectivity not be
446 /// double-counted, but the predicate itself is redundant and need not
447 /// be applied as a filter. (It is an error to have a bit set here but not
448 /// in applied_sargable_join_predicates.)
450 return delayed_predicates;
451 }
453 return delayed_predicates;
454 }
455
456 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
457 /// loaded when rows from this access path are evaluated; that is, this
458 /// access path must be put on the inner side of a nested-loop join (or
459 /// multiple such joins) where the outer side includes all of the given
460 /// tables.
461 ///
462 /// The most obvious case for this is dependent tables in LATERAL, but a more
463 /// common case is when we have pushed join conditions referring to those
464 /// tables; e.g., if this access path represents t1 and we have a condition
465 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
466 /// be set in this bitmap. We can still join in other tables, deferring t2,
467 /// but the bit(s) will then propagate, and we cannot be on the right side of
468 /// a hash join until parameter_tables is zero again. (Also see
469 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
470 /// as an optimization.)
471 ///
472 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
473 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
474 /// that the access path is entirely noncachable, because it depends on
475 /// something nondeterministic or an outer reference, and thus can never be on
476 /// the right side of a hash join, ever.
478
479 /// Auxiliary data used by a secondary storage engine while processing the
480 /// access path during optimization and execution. The secondary storage
481 /// engine is free to store any useful information in this member, for example
482 /// extra statistics or cost estimates. The data pointed to is fully owned by
483 /// the secondary storage engine, and it is the responsibility of the
484 /// secondary engine to manage the memory and make sure it is properly
485 /// destroyed.
486 void *secondary_engine_data{nullptr};
487
488 // Accessors for the union below.
489 auto &table_scan() {
490 assert(type == TABLE_SCAN);
491 return u.table_scan;
492 }
493 const auto &table_scan() const {
494 assert(type == TABLE_SCAN);
495 return u.table_scan;
496 }
497 auto &index_scan() {
498 assert(type == INDEX_SCAN);
499 return u.index_scan;
500 }
501 const auto &index_scan() const {
502 assert(type == INDEX_SCAN);
503 return u.index_scan;
504 }
505 auto &ref() {
506 assert(type == REF);
507 return u.ref;
508 }
509 const auto &ref() const {
510 assert(type == REF);
511 return u.ref;
512 }
513 auto &ref_or_null() {
514 assert(type == REF_OR_NULL);
515 return u.ref_or_null;
516 }
517 const auto &ref_or_null() const {
518 assert(type == REF_OR_NULL);
519 return u.ref_or_null;
520 }
521 auto &eq_ref() {
522 assert(type == EQ_REF);
523 return u.eq_ref;
524 }
525 const auto &eq_ref() const {
526 assert(type == EQ_REF);
527 return u.eq_ref;
528 }
530 assert(type == PUSHED_JOIN_REF);
531 return u.pushed_join_ref;
532 }
533 const auto &pushed_join_ref() const {
534 assert(type == PUSHED_JOIN_REF);
535 return u.pushed_join_ref;
536 }
538 assert(type == FULL_TEXT_SEARCH);
539 return u.full_text_search;
540 }
541 const auto &full_text_search() const {
542 assert(type == FULL_TEXT_SEARCH);
543 return u.full_text_search;
544 }
545 auto &const_table() {
546 assert(type == CONST_TABLE);
547 return u.const_table;
548 }
549 const auto &const_table() const {
550 assert(type == CONST_TABLE);
551 return u.const_table;
552 }
553 auto &mrr() {
554 assert(type == MRR);
555 return u.mrr;
556 }
557 const auto &mrr() const {
558 assert(type == MRR);
559 return u.mrr;
560 }
561 auto &follow_tail() {
562 assert(type == FOLLOW_TAIL);
563 return u.follow_tail;
564 }
565 const auto &follow_tail() const {
566 assert(type == FOLLOW_TAIL);
567 return u.follow_tail;
568 }
570 assert(type == INDEX_RANGE_SCAN);
571 return u.index_range_scan;
572 }
573 const auto &index_range_scan() const {
574 assert(type == INDEX_RANGE_SCAN);
575 return u.index_range_scan;
576 }
577 auto &index_merge() {
578 assert(type == INDEX_MERGE);
579 return u.index_merge;
580 }
581 const auto &index_merge() const {
582 assert(type == INDEX_MERGE);
583 return u.index_merge;
584 }
586 assert(type == ROWID_INTERSECTION);
587 return u.rowid_intersection;
588 }
589 const auto &rowid_intersection() const {
590 assert(type == ROWID_INTERSECTION);
591 return u.rowid_intersection;
592 }
593 auto &rowid_union() {
594 assert(type == ROWID_UNION);
595 return u.rowid_union;
596 }
597 const auto &rowid_union() const {
598 assert(type == ROWID_UNION);
599 return u.rowid_union;
600 }
602 assert(type == INDEX_SKIP_SCAN);
603 return u.index_skip_scan;
604 }
605 const auto &index_skip_scan() const {
606 assert(type == INDEX_SKIP_SCAN);
607 return u.index_skip_scan;
608 }
610 assert(type == GROUP_INDEX_SKIP_SCAN);
611 return u.group_index_skip_scan;
612 }
613 const auto &group_index_skip_scan() const {
614 assert(type == GROUP_INDEX_SKIP_SCAN);
615 return u.group_index_skip_scan;
616 }
619 return u.dynamic_index_range_scan;
620 }
621 const auto &dynamic_index_range_scan() const {
623 return u.dynamic_index_range_scan;
624 }
627 return u.materialized_table_function;
628 }
629 const auto &materialized_table_function() const {
631 return u.materialized_table_function;
632 }
634 assert(type == UNQUALIFIED_COUNT);
635 return u.unqualified_count;
636 }
637 const auto &unqualified_count() const {
638 assert(type == UNQUALIFIED_COUNT);
639 return u.unqualified_count;
640 }
642 assert(type == TABLE_VALUE_CONSTRUCTOR);
643 return u.table_value_constructor;
644 }
645 const auto &table_value_constructor() const {
646 assert(type == TABLE_VALUE_CONSTRUCTOR);
647 return u.table_value_constructor;
648 }
650 assert(type == FAKE_SINGLE_ROW);
651 return u.fake_single_row;
652 }
653 const auto &fake_single_row() const {
654 assert(type == FAKE_SINGLE_ROW);
655 return u.fake_single_row;
656 }
657 auto &zero_rows() {
658 assert(type == ZERO_ROWS);
659 return u.zero_rows;
660 }
661 const auto &zero_rows() const {
662 assert(type == ZERO_ROWS);
663 return u.zero_rows;
664 }
666 assert(type == ZERO_ROWS_AGGREGATED);
667 return u.zero_rows_aggregated;
668 }
669 const auto &zero_rows_aggregated() const {
670 assert(type == ZERO_ROWS_AGGREGATED);
671 return u.zero_rows_aggregated;
672 }
673 auto &hash_join() {
674 assert(type == HASH_JOIN);
675 return u.hash_join;
676 }
677 const auto &hash_join() const {
678 assert(type == HASH_JOIN);
679 return u.hash_join;
680 }
681 auto &bka_join() {
682 assert(type == BKA_JOIN);
683 return u.bka_join;
684 }
685 const auto &bka_join() const {
686 assert(type == BKA_JOIN);
687 return u.bka_join;
688 }
690 assert(type == NESTED_LOOP_JOIN);
691 return u.nested_loop_join;
692 }
693 const auto &nested_loop_join() const {
694 assert(type == NESTED_LOOP_JOIN);
695 return u.nested_loop_join;
696 }
699 return u.nested_loop_semijoin_with_duplicate_removal;
700 }
703 return u.nested_loop_semijoin_with_duplicate_removal;
704 }
705 auto &filter() {
706 assert(type == FILTER);
707 return u.filter;
708 }
709 const auto &filter() const {
710 assert(type == FILTER);
711 return u.filter;
712 }
713 auto &sort() {
714 assert(type == SORT);
715 return u.sort;
716 }
717 const auto &sort() const {
718 assert(type == SORT);
719 return u.sort;
720 }
721 auto &aggregate() {
722 assert(type == AGGREGATE);
723 return u.aggregate;
724 }
725 const auto &aggregate() const {
726 assert(type == AGGREGATE);
727 return u.aggregate;
728 }
730 assert(type == TEMPTABLE_AGGREGATE);
731 return u.temptable_aggregate;
732 }
733 const auto &temptable_aggregate() const {
734 assert(type == TEMPTABLE_AGGREGATE);
735 return u.temptable_aggregate;
736 }
737 auto &limit_offset() {
738 assert(type == LIMIT_OFFSET);
739 return u.limit_offset;
740 }
741 const auto &limit_offset() const {
742 assert(type == LIMIT_OFFSET);
743 return u.limit_offset;
744 }
745 auto &stream() {
746 assert(type == STREAM);
747 return u.stream;
748 }
749 const auto &stream() const {
750 assert(type == STREAM);
751 return u.stream;
752 }
753 auto &materialize() {
754 assert(type == MATERIALIZE);
755 return u.materialize;
756 }
757 const auto &materialize() const {
758 assert(type == MATERIALIZE);
759 return u.materialize;
760 }
763 return u.materialize_information_schema_table;
764 }
767 return u.materialize_information_schema_table;
768 }
769 auto &append() {
770 assert(type == APPEND);
771 return u.append;
772 }
773 const auto &append() const {
774 assert(type == APPEND);
775 return u.append;
776 }
777 auto &window() {
778 assert(type == WINDOW);
779 return u.window;
780 }
781 const auto &window() const {
782 assert(type == WINDOW);
783 return u.window;
784 }
785 auto &weedout() {
786 assert(type == WEEDOUT);
787 return u.weedout;
788 }
789 const auto &weedout() const {
790 assert(type == WEEDOUT);
791 return u.weedout;
792 }
794 assert(type == REMOVE_DUPLICATES);
795 return u.remove_duplicates;
796 }
797 const auto &remove_duplicates() const {
798 assert(type == REMOVE_DUPLICATES);
799 return u.remove_duplicates;
800 }
803 return u.remove_duplicates_on_index;
804 }
805 const auto &remove_duplicates_on_index() const {
807 return u.remove_duplicates_on_index;
808 }
809 auto &alternative() {
810 assert(type == ALTERNATIVE);
811 return u.alternative;
812 }
813 const auto &alternative() const {
814 assert(type == ALTERNATIVE);
815 return u.alternative;
816 }
818 assert(type == CACHE_INVALIDATOR);
819 return u.cache_invalidator;
820 }
821 const auto &cache_invalidator() const {
822 assert(type == CACHE_INVALIDATOR);
823 return u.cache_invalidator;
824 }
825 auto &delete_rows() {
826 assert(type == DELETE_ROWS);
827 return u.delete_rows;
828 }
829 const auto &delete_rows() const {
830 assert(type == DELETE_ROWS);
831 return u.delete_rows;
832 }
833 auto &update_rows() {
834 assert(type == UPDATE_ROWS);
835 return u.update_rows;
836 }
837 const auto &update_rows() const {
838 assert(type == UPDATE_ROWS);
839 return u.update_rows;
840 }
841
842 double num_output_rows() const { return m_num_output_rows; }
843
844 void set_num_output_rows(double val) { m_num_output_rows = val; }
845
846 private:
847 /// Expected number of output rows, -1.0 for unknown.
848 double m_num_output_rows{-1.0};
849
850 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
851 // It is private to force all access to be through the type-checking
852 // accessors.
853 //
854 // For information about the meaning of each value, see the corresponding
855 // row iterator constructors.
856 union {
857 struct {
860 struct {
861 TABLE *table;
862 int idx;
866 struct {
867 TABLE *table;
869 bool use_order;
870 bool reverse;
872 struct {
873 TABLE *table;
875 bool use_order;
877 struct {
878 TABLE *table;
881 struct {
882 TABLE *table;
884 bool use_order;
887 struct {
888 TABLE *table;
890 bool use_order;
894 struct {
895 TABLE *table;
898 struct {
899 TABLE *table;
905 struct {
906 TABLE *table;
908 struct {
909 // The key part(s) we are scanning on. Note that this may be an array.
910 // You can get the table we are working on by looking into
911 // used_key_parts[0].field->table (it is not stored directly, to avoid
912 // going over the AccessPath size limits).
914
915 // The actual ranges we are scanning over (originally derived from “key”).
916 // Not a Bounds_checked_array, to save 4 bytes on the length.
918 unsigned num_ranges;
919
920 unsigned mrr_flags;
921 unsigned mrr_buf_size;
922
923 // Which index (in the TABLE) we are scanning over, and how many of its
924 // key parts we are using.
925 unsigned index;
927
928 // If true, the scan can return rows in rowid order.
930
931 // If true, the scan _should_ return rows in rowid order.
932 // Should only be set if can_be_used_for_ror == true.
934
935 // If true, this plan can be used for index merge scan.
937
938 // See row intersection for more details.
940
941 // Whether we are scanning over a geometry key part.
942 bool geometry : 1;
943
944 // Whether we need a reverse scan. Only supported if geometry == false.
945 bool reverse : 1;
946
947 // For a reverse scan, if we are using extended key parts. It is needed,
948 // to set correct flags when retrieving records.
951 struct {
952 TABLE *table;
957 struct {
958 TABLE *table;
960
961 // Clustered primary key scan, if any.
963
964 bool forced_by_hint;
967
968 // If true, the first child scan should reuse table->file instead of
969 // creating its own. This is true if the intersection is the topmost
970 // range scan, but _not_ if it's below a union. (The reasons for this
971 // are unknown.) It can also be negated by logic involving
972 // retrieve_full_rows and is_covering, again for unknown reasons.
973 //
974 // This is not only for performance; multi-table delete has a hidden
975 // dependency on this behavior when running against certain types of
976 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
977 // when deleting (and not all table types can transfer the position of one
978 // handler to another by using position()).
979 bool reuse_handler;
980
981 // true if no row retrieval phase is necessary.
984 struct {
985 TABLE *table;
987 bool forced_by_hint;
989 struct {
990 TABLE *table;
991 unsigned index;
992 unsigned num_used_key_parts;
993 bool forced_by_hint;
994
995 // Large, and has nontrivial destructors, so split out into
996 // its own allocation.
999 struct {
1000 TABLE *table;
1001 unsigned index;
1002 unsigned num_used_key_parts;
1003 bool forced_by_hint;
1004
1005 // Large, so split out into its own allocation.
1008 struct {
1009 TABLE *table;
1010 QEP_TAB *qep_tab; // Used only for buffering.
1012 struct {
1013 TABLE *table;
1017 struct {
1019
1020 struct {
1021 // No members (implicit from the JOIN).
1023 struct {
1024 // No members.
1026 struct {
1027 // The child is optional. It is only used for keeping track of which
1028 // tables are pruned away by this path, and it is only needed when this
1029 // path is on the inner side of an outer join. See ZeroRowsIterator for
1030 // details. The child of a ZERO_ROWS access path will not be visited by
1031 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1032 // only if called with include_pruned_tables = true. No iterator is
1033 // created for the child, and the child is not shown by EXPLAIN.
1035 // Used for EXPLAIN only.
1036 // TODO(sgunders): make an enum.
1037 const char *cause;
1039 struct {
1040 // Used for EXPLAIN only.
1041 // TODO(sgunders): make an enum.
1042 const char *cause;
1044
1045 struct {
1049 bool store_rowids; // Whether we are below a weedout or not.
1053 struct {
1058 bool store_rowids; // Whether we are below a weedout or not.
1061 struct {
1063 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1067
1068 // Equijoin filters to apply before the join, if any.
1069 // Indexes into join_predicate->expr->equijoin_conditions.
1070 // Non-equijoin conditions are always applied.
1071 // If already_expanded_predicates is true, do not re-expand.
1073
1074 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1075 // this struct needs an initializer, or the union would not be
1076 // default-constructible. If we need more than one union member
1077 // with such an initializer, we would probably need to change
1078 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1079 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1080 nullptr, {}};
1081 struct {
1083 const TABLE *table;
1085 size_t key_len;
1087
1088 struct {
1091
1092 // This parameter, unlike nearly all others, is not passed to the the
1093 // actual iterator. Instead, if true, it signifies that when creating
1094 // the iterator, all materializable subqueries in “condition” should be
1095 // materialized (with any in2exists condition removed first). In the
1096 // very rare case that there are two or more such subqueries, this is
1097 // an all-or-nothing decision, for simplicity.
1098 //
1099 // See FinalizeMaterializedSubqueries().
1102 struct {
1106
1107 // If filesort is nullptr: A new filesort will be created at the
1108 // end of optimization, using this order and flags. Otherwise: Only
1109 // used by EXPLAIN.
1116 struct {
1120 struct {
1123 TABLE *table;
1127 struct {
1129 ha_rows limit;
1133 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1134 // See Query_expression::send_records.
1137 struct {
1141 TABLE *table;
1143 int ref_slice;
1145 struct {
1146 // NOTE: The only legal access paths within table_path are
1147 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE and
1148 // CONST_TABLE (the latter is somewhat nonsensical).
1150
1151 // Large, and has nontrivial destructors, so split out
1152 // into its own allocation.
1154 /** The total cost of executing the queries that we materialize.*/
1157 struct {
1160 Item *condition;
1162 struct {
1165 struct {
1170 int ref_slice;
1173 struct {
1178 struct {
1183 struct {
1185 TABLE *table;
1186 KEY *key;
1189 struct {
1191
1192 // For the ref.
1196 struct {
1198 const char *name;
1200 struct {
1205 struct {
1210 } u;
1211};
1212static_assert(std::is_trivially_destructible<AccessPath>::value,
1213 "AccessPath must be trivially destructible, as it is allocated "
1214 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1215 "(because multiple candidates during planning could point to "
1216 "the same access paths, and refcounting would be expensive)");
1217static_assert(sizeof(AccessPath) <= 144,
1218 "We are creating a lot of access paths in the join "
1219 "optimizer, so be sure not to bloat it without noticing. "
1220 "(96 bytes for the base, 48 bytes for the variant.)");
1221
1222inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1224 to->cost = from.cost;
1225 to->init_cost = from.init_cost;
1226 to->init_once_cost = from.init_once_cost;
1228 to->safe_for_rowid = from.safe_for_rowid;
1229 to->ordering_state = from.ordering_state;
1230}
1231
1232// Trivial factory functions for all of the types of access paths above.
1233
1235 bool count_examined_rows) {
1236 AccessPath *path = new (thd->mem_root) AccessPath;
1238 path->count_examined_rows = count_examined_rows;
1239 path->table_scan().table = table;
1240 return path;
1241}
1242
1243inline AccessPath *NewIndexScanAccessPath(THD *thd, TABLE *table, int idx,
1244 bool use_order, bool reverse,
1245 bool count_examined_rows) {
1246 AccessPath *path = new (thd->mem_root) AccessPath;
1248 path->count_examined_rows = count_examined_rows;
1249 path->index_scan().table = table;
1250 path->index_scan().idx = idx;
1251 path->index_scan().use_order = use_order;
1252 path->index_scan().reverse = reverse;
1253 return path;
1254}
1255
1257 bool use_order, bool reverse,
1258 bool count_examined_rows) {
1259 AccessPath *path = new (thd->mem_root) AccessPath;
1260 path->type = AccessPath::REF;
1261 path->count_examined_rows = count_examined_rows;
1262 path->ref().table = table;
1263 path->ref().ref = ref;
1264 path->ref().use_order = use_order;
1265 path->ref().reverse = reverse;
1266 return path;
1267}
1268
1270 Index_lookup *ref, bool use_order,
1271 bool count_examined_rows) {
1272 AccessPath *path = new (thd->mem_root) AccessPath;
1274 path->count_examined_rows = count_examined_rows;
1275 path->ref_or_null().table = table;
1276 path->ref_or_null().ref = ref;
1277 path->ref_or_null().use_order = use_order;
1278 return path;
1279}
1280
1282 bool count_examined_rows) {
1283 AccessPath *path = new (thd->mem_root) AccessPath;
1284 path->type = AccessPath::EQ_REF;
1285 path->count_examined_rows = count_examined_rows;
1286 path->eq_ref().table = table;
1287 path->eq_ref().ref = ref;
1288 return path;
1289}
1290
1292 Index_lookup *ref, bool use_order,
1293 bool is_unique,
1294 bool count_examined_rows) {
1295 AccessPath *path = new (thd->mem_root) AccessPath;
1297 path->count_examined_rows = count_examined_rows;
1298 path->pushed_join_ref().table = table;
1299 path->pushed_join_ref().ref = ref;
1300 path->pushed_join_ref().use_order = use_order;
1301 path->pushed_join_ref().is_unique = is_unique;
1302 return path;
1303}
1304
1307 Item_func_match *ft_func,
1308 bool use_order, bool use_limit,
1309 bool count_examined_rows) {
1310 AccessPath *path = new (thd->mem_root) AccessPath;
1312 path->count_examined_rows = count_examined_rows;
1313 path->full_text_search().table = table;
1314 path->full_text_search().ref = ref;
1315 path->full_text_search().use_order = use_order;
1316 path->full_text_search().use_limit = use_limit;
1317 path->full_text_search().ft_func = ft_func;
1318 return path;
1319}
1320
1323 bool count_examined_rows) {
1324 AccessPath *path = new (thd->mem_root) AccessPath;
1326 path->count_examined_rows = count_examined_rows;
1327 path->set_num_output_rows(1.0);
1328 path->cost = 0.0;
1329 path->init_cost = 0.0;
1330 path->init_once_cost = 0.0;
1331 path->const_table().table = table;
1332 path->const_table().ref = ref;
1333 return path;
1334}
1335
1337 int mrr_flags) {
1338 AccessPath *path = new (thd->mem_root) AccessPath;
1339 path->type = AccessPath::MRR;
1340 path->mrr().table = table;
1341 path->mrr().ref = ref;
1342 path->mrr().mrr_flags = mrr_flags;
1343
1344 // This will be filled in when the BKA iterator is created.
1345 path->mrr().bka_path = nullptr;
1346
1347 return path;
1348}
1349
1351 bool count_examined_rows) {
1352 AccessPath *path = new (thd->mem_root) AccessPath;
1354 path->count_examined_rows = count_examined_rows;
1355 path->follow_tail().table = table;
1356 return path;
1357}
1358
1360 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1361 AccessPath *path = new (thd->mem_root) AccessPath;
1363 path->count_examined_rows = count_examined_rows;
1364 path->dynamic_index_range_scan().table = table;
1365 path->dynamic_index_range_scan().qep_tab = qep_tab;
1366 return path;
1367}
1368
1370 THD *thd, TABLE *table, Table_function *table_function,
1371 AccessPath *table_path) {
1372 AccessPath *path = new (thd->mem_root) AccessPath;
1374 path->materialized_table_function().table = table;
1375 path->materialized_table_function().table_function = table_function;
1376 path->materialized_table_function().table_path = table_path;
1377 return path;
1378}
1379
1381 AccessPath *path = new (thd->mem_root) AccessPath;
1383 return path;
1384}
1385
1387 AccessPath *path = new (thd->mem_root) AccessPath;
1389 // The iterator keeps track of which row it is at in examined_rows,
1390 // so we always need to give it the pointer.
1391 path->count_examined_rows = true;
1392 return path;
1393}
1394
1396 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1397 KEY *key, size_t key_len) {
1398 AccessPath *path = new (thd->mem_root) AccessPath;
1400 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1401 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1402 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1403 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1404 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1405 return path;
1406}
1407
1409 Item *condition) {
1410 AccessPath *path = new (thd->mem_root) AccessPath;
1411 path->type = AccessPath::FILTER;
1412 path->filter().child = child;
1413 path->filter().condition = condition;
1414 path->filter().materialize_subqueries = false;
1415 return path;
1416}
1417
1418// Not inline, because it needs access to filesort internals
1419// (which are forward-declared in this file).
1421 ORDER *order, bool count_examined_rows);
1422
1424 bool rollup) {
1425 AccessPath *path = new (thd->mem_root) AccessPath;
1427 path->aggregate().child = child;
1428 path->aggregate().rollup = rollup;
1429 return path;
1430}
1431
1433 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1434 TABLE *table, AccessPath *table_path, int ref_slice) {
1435 AccessPath *path = new (thd->mem_root) AccessPath;
1437 path->temptable_aggregate().subquery_path = subquery_path;
1438 path->temptable_aggregate().temp_table_param = temp_table_param;
1439 path->temptable_aggregate().table = table;
1440 path->temptable_aggregate().table_path = table_path;
1441 path->temptable_aggregate().ref_slice = ref_slice;
1442 return path;
1443}
1444
1446 ha_rows limit, ha_rows offset,
1447 bool count_all_rows,
1448 bool reject_multiple_rows,
1449 ha_rows *send_records_override) {
1451 AccessPath *path = new (thd->mem_root) AccessPath;
1453 path->immediate_update_delete_table = child->immediate_update_delete_table;
1454 path->limit_offset().child = child;
1455 path->limit_offset().limit = limit;
1456 path->limit_offset().offset = offset;
1457 path->limit_offset().count_all_rows = count_all_rows;
1458 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1459 path->limit_offset().send_records_override = send_records_override;
1460 path->ordering_state = child->ordering_state;
1462 return path;
1463}
1464
1466 bool count_examined_rows) {
1467 AccessPath *path = new (thd->mem_root) AccessPath;
1469 path->count_examined_rows = count_examined_rows;
1470 path->set_num_output_rows(1.0);
1471 path->cost = 0.0;
1472 path->init_cost = 0.0;
1473 path->init_once_cost = 0.0;
1474 return path;
1475}
1476
1478 const char *cause) {
1479 AccessPath *path = new (thd->mem_root) AccessPath;
1481 path->zero_rows().child = child;
1482 path->zero_rows().cause = cause;
1483 path->set_num_output_rows(0.0);
1484 path->cost = 0.0;
1485 path->init_cost = 0.0;
1486 path->init_once_cost = 0.0;
1487 path->num_output_rows_before_filter = 0.0;
1488 path->cost_before_filter = 0.0;
1489 return path;
1490}
1491
1492inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1493 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1494}
1495
1497 const char *cause) {
1498 AccessPath *path = new (thd->mem_root) AccessPath;
1500 path->zero_rows_aggregated().cause = cause;
1501 path->set_num_output_rows(1.0);
1502 path->cost = 0.0;
1503 path->init_cost = 0.0;
1504 return path;
1505}
1506
1508 JOIN *join,
1509 Temp_table_param *temp_table_param,
1510 TABLE *table, int ref_slice) {
1511 AccessPath *path = new (thd->mem_root) AccessPath;
1512 path->type = AccessPath::STREAM;
1513 path->stream().child = child;
1514 path->stream().join = join;
1515 path->stream().temp_table_param = temp_table_param;
1516 path->stream().table = table;
1517 path->stream().ref_slice = ref_slice;
1518 // Will be set later if we get a weedout access path as parent.
1519 path->stream().provide_rowid = false;
1520 return path;
1521}
1522
1525 JOIN *join, bool copy_items,
1526 Temp_table_param *temp_table_param) {
1527 assert(path != nullptr);
1529 MaterializePathParameters::QueryBlock &query_block = array[0];
1530 query_block.subquery_path = path;
1531 query_block.select_number = select_number;
1532 query_block.join = join;
1533 query_block.disable_deduplication_by_hash_field = false;
1534 query_block.copy_items = copy_items;
1535 query_block.temp_table_param = temp_table_param;
1536 return array;
1537}
1538
1540 THD *thd,
1542 Mem_root_array<const AccessPath *> *invalidators, TABLE *table,
1543 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1544 int ref_slice, bool rematerialize, ha_rows limit_rows,
1545 bool reject_multiple_rows) {
1548 param->query_blocks = std::move(query_blocks);
1549 if (rematerialize) {
1550 // There's no point in adding invalidators if we're rematerializing
1551 // every time anyway.
1552 param->invalidators = nullptr;
1553 } else {
1554 param->invalidators = invalidators;
1555 }
1556 param->table = table;
1557 param->cte = cte;
1558 param->unit = unit;
1559 param->ref_slice = ref_slice;
1560 param->rematerialize = rematerialize;
1561 param->limit_rows = (table == nullptr || table->is_union_or_table()
1562 ? limit_rows
1563 :
1564 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1565 // see its constructor
1566 HA_POS_ERROR);
1567 param->reject_multiple_rows = reject_multiple_rows;
1568
1569#ifndef NDEBUG
1570 for (MaterializePathParameters::QueryBlock &query_block :
1571 param->query_blocks) {
1572 assert(query_block.subquery_path != nullptr);
1573 }
1574#endif
1575
1576 AccessPath *path = new (thd->mem_root) AccessPath;
1578 path->materialize().table_path = table_path;
1579 path->materialize().param = param;
1580 path->materialize().subquery_cost = -1.0;
1581 if (rematerialize) {
1582 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1583 } else {
1584 // The default; this is just to be explicit in the code.
1585 path->safe_for_rowid = AccessPath::SAFE;
1586 }
1587 return path;
1588}
1589
1591 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1592 AccessPath *path = new (thd->mem_root) AccessPath;
1594 path->materialize_information_schema_table().table_path = table_path;
1595 path->materialize_information_schema_table().table_list = table_list;
1596 path->materialize_information_schema_table().condition = condition;
1597 return path;
1598}
1599
1600// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1601// long as the access path.
1603 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1604 AccessPath *path = new (thd->mem_root) AccessPath;
1605 path->type = AccessPath::APPEND;
1606 path->append().children = children;
1607 path->cost = 0.0;
1608 path->init_cost = 0.0;
1609 path->init_once_cost = 0.0;
1610 for (const AppendPathParameters &child : *children) {
1611 path->cost += child.path->cost;
1612 path->init_cost += child.path->init_cost;
1613 path->init_once_cost += child.path->init_once_cost;
1614 }
1615 return path;
1616}
1617
1619 Window *window,
1620 Temp_table_param *temp_table_param,
1621 int ref_slice, bool needs_buffering) {
1622 AccessPath *path = new (thd->mem_root) AccessPath;
1623 path->type = AccessPath::WINDOW;
1624 path->window().child = child;
1625 path->window().window = window;
1626 path->window().temp_table = nullptr;
1627 path->window().temp_table_param = temp_table_param;
1628 path->window().ref_slice = ref_slice;
1629 path->window().needs_buffering = needs_buffering;
1630 return path;
1631}
1632
1634 SJ_TMP_TABLE *weedout_table) {
1635 AccessPath *path = new (thd->mem_root) AccessPath;
1636 path->type = AccessPath::WEEDOUT;
1637 path->weedout().child = child;
1638 path->weedout().weedout_table = weedout_table;
1639 path->weedout().tables_to_get_rowid_for =
1640 0; // Must be handled by the caller.
1641 return path;
1642}
1643
1645 Item **group_items,
1646 int group_items_size) {
1647 AccessPath *path = new (thd->mem_root) AccessPath;
1649 path->remove_duplicates().child = child;
1650 path->remove_duplicates().group_items = group_items;
1651 path->remove_duplicates().group_items_size = group_items_size;
1652 return path;
1653}
1654
1656 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1657 unsigned loosescan_key_len) {
1658 AccessPath *path = new (thd->mem_root) AccessPath;
1660 path->remove_duplicates_on_index().child = child;
1661 path->remove_duplicates_on_index().table = table;
1662 path->remove_duplicates_on_index().key = key;
1663 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1664 return path;
1665}
1666
1668 AccessPath *table_scan_path,
1669 Index_lookup *used_ref) {
1670 AccessPath *path = new (thd->mem_root) AccessPath;
1672 path->alternative().table_scan_path = table_scan_path;
1673 path->alternative().child = child;
1674 path->alternative().used_ref = used_ref;
1675 return path;
1676}
1677
1679 const char *name) {
1680 AccessPath *path = new (thd->mem_root) AccessPath;
1682 path->cache_invalidator().child = child;
1683 path->cache_invalidator().name = name;
1684 return path;
1685}
1686
1688 table_map delete_tables,
1689 table_map immediate_tables);
1690
1692 table_map delete_tables,
1693 table_map immediate_tables);
1694
1695/**
1696 Modifies "path" and the paths below it so that they provide row IDs for
1697 all tables.
1698 */
1700
1701/**
1702 If the path is a FILTER path marked that subqueries are to be materialized,
1703 do so. If not, do nothing.
1704
1705 It is important that this is not called until the entire plan is ready;
1706 not just when planning a single query block. The reason is that a query
1707 block A with materializable subqueries may itself be part of a materializable
1708 subquery B, so if one calls this when planning A, the subqueries in A will
1709 irrevocably be materialized, even if that is not the optimal plan given B.
1710 Thus, this is done when creating iterators.
1711 */
1713
1716 bool eligible_for_batch_mode);
1717
1718// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1719// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1720// that does anything else is DynamicRangeIterator.)
1722 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1724 eligible_for_batch_mode);
1725}
1726
1727void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1728 const POSITION *pos, bool is_after_filter,
1729 AccessPath *path);
1730
1731/**
1732 Return the TABLE* referred from 'path' if it is a basic access path,
1733 else a nullptr is returned. Temporary tables, such as those used by
1734 sorting, aggregate and subquery materialization are not returned.
1735*/
1737
1738/**
1739 Returns a map of all tables read when `path` or any of its children are
1740 executed. Only iterators that are part of the same query block as `path`
1741 are considered.
1742
1743 If a table is read that doesn't have a map, specifically the temporary
1744 tables made as part of materialization within the same query block,
1745 RAND_TABLE_BIT will be set as a convention and none of that access path's
1746 children will be included in the map. In this case, the caller will need to
1747 manually go in and find said access path, to ask it for its TABLE object.
1748
1749 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1750 access path (ie., pruned away due to impossible join conditions) will be
1751 included in the map. This is normally what you want, as those tables need to
1752 be included whenever you store NULL flags and the likes, but if you don't
1753 want them (perhaps to specifically check for conditions referring to pruned
1754 tables), you can set it to false.
1755 */
1756table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1757
1758/**
1759 Find the list of all tables used by this root, stopping at materializations.
1760 Used for knowing which tables to sort.
1761 */
1763
1764/**
1765 For each access path in the (sub)tree rooted at “path”, expand any use of
1766 “filter_predicates” into newly-inserted FILTER access paths, using the given
1767 predicate list. This is used after finding an optimal set of access paths,
1768 to normalize the tree so that the remaining consumers do not need to worry
1769 about filter_predicates and cost_before_filter.
1770
1771 “join” is the join that “path” is part of.
1772 */
1774 const Mem_root_array<Predicate> &predicates,
1775 unsigned num_where_predicates);
1776
1777/**
1778 Extracts the Item expression from the given “filter_predicates” corresponding
1779 to the given “mask”.
1780 */
1783 int num_where_predicates);
1784
1785/// Like ExpandFilterAccessPaths(), but expands only the single access path
1786/// at “path”.
1788 const Mem_root_array<Predicate> &predicates,
1789 unsigned num_where_predicates);
1790
1791/// Returns the tables that are part of a hash join.
1793
1794#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1507
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1644
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1678
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1256
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:100
bool FinalizeMaterializedSubqueries(THD *thd, JOIN *join, AccessPath *path)
If the path is a FILTER path marked that subqueries are to be materialized, do so.
Definition: access_path.cc:284
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1222
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1325
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1305
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1432
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1447
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1359
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1496
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1667
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:112
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1336
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:214
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1602
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1395
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1655
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1465
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1314
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1408
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:65
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1369
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::QueryBlock > query_blocks, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1539
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:245
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1234
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1435
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1590
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1170
Mem_root_array< MaterializePathParameters::QueryBlock > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1524
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, bool rollup)
Definition: access_path.h:1423
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:161
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1269
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1445
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1281
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1618
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1350
AccessPath * NewTableValueConstructorAccessPath(THD *thd)
Definition: access_path.h:1386
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1321
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:354
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1633
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1291
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1477
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1380
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1243
A wrapper class which provides array bounds checking.
Definition: sql_array.h:46
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4291
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:51
Sorting related info.
Definition: filesort.h:51
Definition: item_func.h:3417
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:850
Definition: sql_optimizer.h:125
Definition: key.h:112
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:425
Definition: overflow_bitset.h:76
Definition: sql_executor.h:259
Definition: range_optimizer.h:68
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:623
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:81
Definition: sql_executor.h:103
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:33
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:37
Class representing a table function.
Definition: table_function.h:52
Definition: table.h:2755
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:94
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:104
static MEM_ROOT mem_root
Definition: client_plugin.cc:109
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a LIMIT_OFFSET AccessPath.
Definition: cost_model.cc:801
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:366
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1852
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:62
JoinType
Definition: join_type.h:27
static mi_bit_type mask[]
Definition: mi_packrec.cc:140
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:488
my_off_t ha_rows
Definition: my_base.h:1139
#define HA_POS_ERROR
Definition: my_base.h:1141
uint64_t table_map
Definition: my_table_map.h:29
static char * path
Definition: mysqldump.cc:133
PT & ref(PT *tp)
Definition: tablespace_impl.cc:358
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:39
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:131
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:150
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:59
case opt name
Definition: sslopt-case.h:32
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:189
auto & weedout()
Definition: access_path.h:785
auto & filter()
Definition: access_path.h:705
bool count_all_rows
Definition: access_path.h:1131
AccessPath * bka_path
Definition: access_path.h:901
auto & ref_or_null()
Definition: access_path.h:513
auto & materialized_table_function()
Definition: access_path.h:625
AccessPath * cpk_child
Definition: access_path.h:962
auto & rowid_union()
Definition: access_path.h:593
double cost_before_filter
Definition: access_path.h:391
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:477
const auto & bka_join() const
Definition: access_path.h:685
TABLE * temp_table
Definition: access_path.h:1168
OverflowBitset equijoin_predicates
Definition: access_path.h:1072
Item ** group_items
Definition: access_path.h:1180
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:387
bool rollup
Definition: access_path.h:1118
auto & materialize()
Definition: access_path.h:753
const auto & temptable_aggregate() const
Definition: access_path.h:733
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:449
auto & index_scan()
Definition: access_path.h:497
const auto & delete_rows() const
Definition: access_path.h:829
struct AccessPath::@55::@81 nested_loop_join
const auto & filter() const
Definition: access_path.h:709
auto & delete_rows()
Definition: access_path.h:825
bool reuse_handler
Definition: access_path.h:939
KEY_PART * used_key_part
Definition: access_path.h:913
struct AccessPath::@55::@90 materialize_information_schema_table
struct AccessPath::@55::@71 group_index_skip_scan
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:429
Item * condition
Definition: access_path.h:1090
struct AccessPath::@55::@74 unqualified_count
const auto & index_merge() const
Definition: access_path.h:581
const auto & update_rows() const
Definition: access_path.h:837
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:809
auto & pushed_join_ref()
Definition: access_path.h:529
AccessPath * outer
Definition: access_path.h:1046
auto & index_skip_scan()
Definition: access_path.h:601
struct AccessPath::@55::@95 remove_duplicates_on_index
bool rewrite_semi_to_inner
Definition: access_path.h:1050
const auto & zero_rows_aggregated() const
Definition: access_path.h:669
auto & group_index_skip_scan()
Definition: access_path.h:609
struct AccessPath::@55::@76 fake_single_row
const auto & eq_ref() const
Definition: access_path.h:525
int group_items_size
Definition: access_path.h:1181
bool use_order
Definition: access_path.h:863
struct AccessPath::@55::@88 stream
bool pfs_batch_mode
Definition: access_path.h:1064
auto & temptable_aggregate()
Definition: access_path.h:729
AccessPath()
Definition: access_path.h:195
GroupIndexSkipScanParameters * param
Definition: access_path.h:1006
auto & rowid_intersection()
Definition: access_path.h:585
bool materialize_subqueries
Definition: access_path.h:1100
AccessPath * child
Definition: access_path.h:1034
bool allow_spill_to_disk
Definition: access_path.h:1048
Index_lookup * used_ref
Definition: access_path.h:1194
struct AccessPath::@55::@78 zero_rows_aggregated
auto & mrr()
Definition: access_path.h:553
struct AccessPath::@55::@70 index_skip_scan
const auto & index_scan() const
Definition: access_path.h:501
const auto & fake_single_row() const
Definition: access_path.h:653
bool reject_multiple_rows
Definition: access_path.h:1132
bool allow_clustered_primary_key_scan
Definition: access_path.h:954
const char * name
Definition: access_path.h:1198
bool use_limit
Definition: access_path.h:891
Window * window
Definition: access_path.h:1167
table_map tables_to_get_rowid_for
Definition: access_path.h:1051
auto & materialize_information_schema_table()
Definition: access_path.h:761
auto & bka_join()
Definition: access_path.h:681
const auto & follow_tail() const
Definition: access_path.h:565
struct AccessPath::@55::@83 filter
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1175
Index_lookup * ref
Definition: access_path.h:868
const auto & window() const
Definition: access_path.h:781
const auto & materialized_table_function() const
Definition: access_path.h:629
auto & unqualified_count()
Definition: access_path.h:633
bool keep_current_rowid
Definition: access_path.h:903
bool using_extended_key_parts
Definition: access_path.h:949
auto & nested_loop_join()
Definition: access_path.h:689
const auto & full_text_search() const
Definition: access_path.h:541
bool can_be_used_for_ror
Definition: access_path.h:929
float rec_per_key
Definition: access_path.h:1057
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:288
JOIN * join
Definition: access_path.h:1139
struct AccessPath::@55::@69 rowid_union
unsigned index
Definition: access_path.h:925
unsigned mrr_buf_size
Definition: access_path.h:921
struct AccessPath::@55::@60 eq_ref
auto & remove_duplicates()
Definition: access_path.h:793
struct AccessPath::@55::@68 rowid_intersection
struct AccessPath::@55::@61 pushed_join_ref
struct AccessPath::@55::@63 const_table
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:355
const auto & stream() const
Definition: access_path.h:749
double init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:367
const auto & remove_duplicates() const
Definition: access_path.h:797
auto & window()
Definition: access_path.h:777
bool need_rows_in_rowid_order
Definition: access_path.h:933
Table_ref * table_list
Definition: access_path.h:1159
const auto & unqualified_count() const
Definition: access_path.h:637
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:486
struct AccessPath::@55::@86 temptable_aggregate
struct AccessPath::@55::@85 aggregate
void set_num_output_rows(double val)
Definition: access_path.h:844
bool is_covering
Definition: access_path.h:982
const auto & mrr() const
Definition: access_path.h:557
bool remove_duplicates
Definition: access_path.h:1112
table_map tables_to_update
Definition: access_path.h:1207
const auto & table_value_constructor() const
Definition: access_path.h:645
KEY * key
Definition: access_path.h:1084
auto & table_value_constructor()
Definition: access_path.h:641
auto & stream()
Definition: access_path.h:745
size_t key_len
Definition: access_path.h:1085
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:452
const auto & zero_rows() const
Definition: access_path.h:661
const auto & append() const
Definition: access_path.h:773
ha_rows offset
Definition: access_path.h:1130
int idx
Definition: access_path.h:862
struct AccessPath::@55::@87 limit_offset
auto & hash_join()
Definition: access_path.h:673
auto & cache_invalidator()
Definition: access_path.h:817
struct AccessPath::@55::@73 materialized_table_function
bool force_sort_rowids
Definition: access_path.h:1114
unsigned num_used_key_parts
Definition: access_path.h:926
unsigned num_ranges
Definition: access_path.h:918
ORDER * order
Definition: access_path.h:1110
auto & sort()
Definition: access_path.h:713
auto & fake_single_row()
Definition: access_path.h:649
ha_rows limit
Definition: access_path.h:1111
auto & follow_tail()
Definition: access_path.h:561
double m_num_output_rows
Expected number of output rows, -1.0 for unknown.
Definition: access_path.h:848
const JoinPredicate * join_predicate
Definition: access_path.h:1047
const auto & const_table() const
Definition: access_path.h:549
union AccessPath::@55 u
const auto & dynamic_index_range_scan() const
Definition: access_path.h:621
const auto & table_scan() const
Definition: access_path.h:493
unsigned mrr_flags
Definition: access_path.h:920
int ref_slice
Definition: access_path.h:1125
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:413
bool can_be_used_for_imerge
Definition: access_path.h:936
bool forced_by_hint
Definition: access_path.h:953
auto & limit_offset()
Definition: access_path.h:737
AccessPath * inner
Definition: access_path.h:1046
bool provide_rowid
Definition: access_path.h:1142
const auto & index_range_scan() const
Definition: access_path.h:573
struct AccessPath::@55::@59 ref_or_null
const auto & ref() const
Definition: access_path.h:509
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:299
Item_func_match * ft_func
Definition: access_path.h:892
struct AccessPath::@55::@79 hash_join
ha_rows * send_records_override
Definition: access_path.h:1135
struct AccessPath::@55::@77 zero_rows
auto & table_scan()
Definition: access_path.h:489
const auto & hash_join() const
Definition: access_path.h:677
QEP_TAB * qep_tab
Definition: access_path.h:1010
Table_function * table_function
Definition: access_path.h:1014
auto & zero_rows_aggregated()
Definition: access_path.h:665
const auto & sort() const
Definition: access_path.h:717
bool unwrap_rollup
Definition: access_path.h:1113
struct AccessPath::@55::@93 weedout
double init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:379
Type
Definition: access_path.h:204
@ FOLLOW_TAIL
Definition: access_path.h:217
@ FILTER
Definition: access_path.h:241
@ PUSHED_JOIN_REF
Definition: access_path.h:213
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:230
@ UPDATE_ROWS
Definition: access_path.h:259
@ AGGREGATE
Definition: access_path.h:243
@ BKA_JOIN
Definition: access_path.h:237
@ ZERO_ROWS
Definition: access_path.h:229
@ CONST_TABLE
Definition: access_path.h:215
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:223
@ INDEX_RANGE_SCAN
Definition: access_path.h:218
@ UNQUALIFIED_COUNT
Definition: access_path.h:232
@ EQ_REF
Definition: access_path.h:212
@ FAKE_SINGLE_ROW
Definition: access_path.h:228
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:248
@ WINDOW
Definition: access_path.h:250
@ REF_OR_NULL
Definition: access_path.h:211
@ MATERIALIZE
Definition: access_path.h:247
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:236
@ ROWID_UNION
Definition: access_path.h:221
@ INDEX_SKIP_SCAN
Definition: access_path.h:222
@ MRR
Definition: access_path.h:216
@ CACHE_INVALIDATOR
Definition: access_path.h:255
@ INDEX_SCAN
Definition: access_path.h:209
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:227
@ WEEDOUT
Definition: access_path.h:251
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:231
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:253
@ TABLE_SCAN
Definition: access_path.h:208
@ REF
Definition: access_path.h:210
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:244
@ LIMIT_OFFSET
Definition: access_path.h:245
@ APPEND
Definition: access_path.h:249
@ NESTED_LOOP_JOIN
Definition: access_path.h:235
@ INDEX_MERGE
Definition: access_path.h:219
@ FULL_TEXT_SEARCH
Definition: access_path.h:214
@ ALTERNATIVE
Definition: access_path.h:254
@ STREAM
Definition: access_path.h:246
@ REMOVE_DUPLICATES
Definition: access_path.h:252
@ ROWID_INTERSECTION
Definition: access_path.h:220
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:224
@ DELETE_ROWS
Definition: access_path.h:258
@ SORT
Definition: access_path.h:242
@ HASH_JOIN
Definition: access_path.h:238
bool retrieve_full_rows
Definition: access_path.h:965
const auto & rowid_union() const
Definition: access_path.h:597
const TABLE * table
Definition: access_path.h:1083
const auto & index_skip_scan() const
Definition: access_path.h:605
auto & dynamic_index_range_scan()
Definition: access_path.h:617
const auto & pushed_join_ref() const
Definition: access_path.h:533
struct AccessPath::@55::@72 dynamic_index_range_scan
auto & remove_duplicates_on_index()
Definition: access_path.h:801
table_map immediate_tables
Definition: access_path.h:1203
struct AccessPath::@55::@64 mrr
int mrr_flags
Definition: access_path.h:902
auto & full_text_search()
Definition: access_path.h:537
const auto & materialize() const
Definition: access_path.h:757
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:349
auto & eq_ref()
Definition: access_path.h:521
JoinType join_type
Definition: access_path.h:1055
struct AccessPath::@55::@84 sort
struct AccessPath::@55::@66 index_range_scan
struct AccessPath::@55::@97 cache_invalidator
const auto & limit_offset() const
Definition: access_path.h:741
MaterializePathParameters * param
Definition: access_path.h:1153
const char * cause
Definition: access_path.h:1037
struct AccessPath::@55::@98 delete_rows
auto & update_rows()
Definition: access_path.h:833
auto & index_merge()
Definition: access_path.h:577
auto & aggregate()
Definition: access_path.h:721
struct AccessPath::@55::@62 full_text_search
const auto & group_index_skip_scan() const
Definition: access_path.h:613
auto & zero_rows()
Definition: access_path.h:657
AccessPath * subquery_path
Definition: access_path.h:1121
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1163
double cost
Expected cost to read all of this access path once; -1.0 for unknown.
Definition: access_path.h:358
const auto & nested_loop_join() const
Definition: access_path.h:693
struct AccessPath::@55::@96 alternative
bool already_expanded_predicates
Definition: access_path.h:1065
const auto & remove_duplicates_on_index() const
Definition: access_path.h:805
struct AccessPath::@55::@67 index_merge
Temp_table_param * temp_table_param
Definition: access_path.h:1122
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:697
bool reverse
Definition: access_path.h:864
AccessPath * table_scan_path
Definition: access_path.h:1190
const auto & materialize_information_schema_table() const
Definition: access_path.h:765
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1155
bool geometry
Definition: access_path.h:942
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:294
unsigned loosescan_key_len
Definition: access_path.h:1187
auto & ref()
Definition: access_path.h:505
struct AccessPath::@55::@80 bka_join
struct AccessPath::@55::@82 nested_loop_semijoin_with_duplicate_removal
table_map tables_to_delete_from
Definition: access_path.h:1202
struct AccessPath::@55::@57 index_scan
IndexSkipScanParameters * param
Definition: access_path.h:997
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:432
Mem_root_array< AccessPath * > * children
Definition: access_path.h:955
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:441
AccessPath * table_path
Definition: access_path.h:1015
const auto & aggregate() const
Definition: access_path.h:725
TABLE * table
Definition: access_path.h:858
auto & append()
Definition: access_path.h:769
struct AccessPath::@55::@99 update_rows
const auto & weedout() const
Definition: access_path.h:789
const auto & cache_invalidator() const
Definition: access_path.h:821
bool needs_buffering
Definition: access_path.h:1171
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:343
QUICK_RANGE ** ranges
Definition: access_path.h:917
bool is_unique
Definition: access_path.h:885
const auto & alternative() const
Definition: access_path.h:813
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:269
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:278
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:284
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:271
auto & const_table()
Definition: access_path.h:545
struct AccessPath::@55::@75 table_value_constructor
unsigned mrr_length_per_rec
Definition: access_path.h:1056
struct AccessPath::@55::@65 follow_tail
const auto & rowid_intersection() const
Definition: access_path.h:589
auto & index_range_scan()
Definition: access_path.h:569
double num_output_rows() const
Definition: access_path.h:842
struct AccessPath::@55::@89 materialize
double num_output_rows_before_filter
If no filter, identical to num_output_rows, cost, respectively.
Definition: access_path.h:391
const auto & ref_or_null() const
Definition: access_path.h:517
struct AccessPath::@55::@91 append
Filesort * filesort
Definition: access_path.h:1104
bool store_rowids
Definition: access_path.h:1049
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:701
struct AccessPath::@55::@56 table_scan
Definition: access_path.h:162
JOIN * join
Definition: access_path.h:164
AccessPath * path
Definition: access_path.h:163
Definition: group_index_skip_scan_plan.h:44
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:72
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:66
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:75
int semijoin_group_size
Definition: access_path.h:108
FunctionalDependencySet functional_dependencies
Definition: access_path.h:92
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:97
RelationalExpression * expr
Definition: access_path.h:76
double selectivity
Definition: access_path.h:77
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:102
Item ** semijoin_group
Definition: access_path.h:107
size_t estimated_bytes_per_row
Definition: access_path.h:81
Definition: range_optimizer.h:54
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:82
Definition: materialize_path_parameters.h:41
int select_number
Definition: materialize_path_parameters.h:43
bool copy_items
Definition: materialize_path_parameters.h:46
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:47
AccessPath * subquery_path
Definition: materialize_path_parameters.h:42
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:45
JOIN * join
Definition: materialize_path_parameters.h:44
Definition: materialize_path_parameters.h:39
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:79
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:64
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:98
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:91
Mem_root_array< QueryBlock > query_blocks
Definition: materialize_path_parameters.h:57
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:61
int ref_slice
Definition: materialize_path_parameters.h:73
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:67
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:58
Definition: table.h:280
A position of table within a join order.
Definition: sql_select.h:351
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:116
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:129
bool was_join_condition
Definition: access_path.h:142
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:154
FunctionalDependencySet functional_dependencies
Definition: access_path.h:153
int source_multiple_equality_idx
Definition: access_path.h:150
hypergraph::NodeMap used_nodes
Definition: access_path.h:120
Item * condition
Definition: access_path.h:117
double selectivity
Definition: access_path.h:131
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:159
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:80
Definition: table.h:1395
bool is_union_or_table() const
Test if this tmp table stores the result of a UNION set operation or a single table.
Definition: table.h:1547