MySQL 8.0.29
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2021, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
24#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25
26#include <assert.h>
27#include <stdint.h>
28
29#include <string>
30#include <type_traits>
31#include <vector>
32
39#include "sql/join_type.h"
40#include "sql/mem_root_array.h"
41#include "sql/sql_array.h"
42#include "sql/sql_class.h"
43
44template <class T>
47class Filesort;
48class Item;
49class Item_func_match;
50class JOIN;
51class KEY;
52class QEP_TAB;
53class QUICK_RANGE;
54class SJ_TMP_TABLE;
55class Table_function;
57class Window;
58struct AccessPath;
61struct KEY_PART;
62struct ORDER;
63struct POSITION;
65struct TABLE;
66struct TABLE_REF;
67
68/**
69 A specification that two specific relational expressions
70 (e.g., two tables, or a table and a join between two other tables)
71 should be joined together. The actual join conditions, if any,
72 live inside the “expr” object, as does the join type etc.
73 */
77
78 // If this join is made using a hash join, estimates the width
79 // of each row as stored in the hash table, in bytes.
81
82 // The set of (additional) functional dependencies that are active
83 // after this join predicate has been applied. E.g. if we're joining
84 // on t1.x = t2.x, there will be a bit for that functional dependency.
85 // We don't currently support more complex join conditions, but there's
86 // no conceptual reason why we couldn't, e.g. a join on a = b + c
87 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
88 // or {a, c} → b.
89 //
90 // Used in the processing of interesting orders.
92
93 // A less compact form of functional_dependencies, used during building
94 // (FunctionalDependencySet bitmaps are only available after all functional
95 // indexes have been collected and Build() has been called).
97
98 // If this is a suitable semijoin: Contains the grouping given by the
99 // join key. If the rows are in this grouping, then the join optimizer will
100 // consider deduplicating on it and inverting the join. -1 otherwise.
102
103 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
104 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
105 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
106 Item **semijoin_group = nullptr;
108};
109
110/**
111 A filter of some sort that is not a join condition (those are stored
112 in JoinPredicate objects). AND conditions are typically split up into
113 multiple Predicates.
114 */
115struct Predicate {
117
118 // condition->used_tables(), converted to a NodeMap.
120
121 // tables referred to by the condition, plus any tables whose values
122 // can null any of those tables. (Even when reordering outer joins,
123 // at least one of those tables will still be present on the
124 // left-hand side of the outer join, so this is sufficient.)
125 //
126 // As a special case, we allow setting RAND_TABLE_BIT, even though it
127 // is normally part of a table_map, not a NodeMap.
129
131
132 // Whether this predicate is a join condition after all; it was promoted
133 // to a WHERE predicate since it was part of a cycle (see the comment in
134 // AddCycleEdges()). If it is, it is usually ignored so that we don't
135 // double-apply join conditions -- but if the join in question was not
136 // applied (because the cycle was broken at this point), the predicate
137 // would come into play. This is normally registered on the join itself
138 // (see RelationalExpression::join_predicate_bitmap), but having the bit
139 // on the predicate itself is used to avoid trying to push it down as a
140 // sargable predicate.
141 bool was_join_condition = false;
142
143 // If this is a join condition that came from a multiple equality,
144 // and we have decided to create a mesh from that multiple equality,
145 // returns the index of it into the “multiple_equalities” array
146 // in MakeJoinHypergraph(). (You don't actually need the array to
147 // use this; it's just an opaque index to deduplicate between different
148 // predicates.) Otherwise, -1.
150
151 // See the equivalent fields in JoinPredicate.
154
155 // The list of all subqueries referred to in this predicate, if any.
156 // The optimizer uses this to add their materialized/non-materialized
157 // costs when evaluating filters.
159};
160
164};
165
166/**
167 Access paths are a query planning structure that correspond 1:1 to iterators,
168 in that an access path contains pretty much exactly the information
169 needed to instantiate given iterator, plus some information that is only
170 needed during planning, such as costs. (The new join optimizer will extend
171 this somewhat in the future. Some iterators also need the query block,
172 ie., JOIN object, they are part of, but that is implicitly available when
173 constructing the tree.)
174
175 AccessPath objects build on a variant, ie., they can hold an access path of
176 any type (table scan, filter, hash join, sort, etc.), although only one at the
177 same time. Currently, they contain 32 bytes of base information that is common
178 to any access path (type identifier, costs, etc.), and then up to 40 bytes
179 that is type-specific (e.g. for a table scan, the TABLE object). It would be
180 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
181 does not seem to be easy without fairly large contortions.
182
183 We could have solved this by inheritance, but the fixed-size design makes it
184 possible to replace an access path when a better one is found, without
185 introducing a new allocation, which will be important when using them as a
186 planning structure.
187 */
189 // Most of the members are declared with initializers, but bit fields cannot
190 // be declared with initializers until C++20, so we need to define a
191 // constructor to initialize those fields.
192 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
193 // too, and use the default constructor generated by the compiler.
195 : count_examined_rows(false)
196#ifndef NDEBUG
197 ,
198 forced_by_dbug(false)
199#endif
200 {
201 }
202
203 enum Type : uint8_t {
204 // Basic access paths (those with no children, at least nominally).
222
223 // Basic access paths that don't correspond to a specific table.
230
231 // Joins.
236
237 // Composite access paths.
253
254 // Access paths that modify tables.
257
258 /// A general enum to describe the safety of a given operation.
259 /// Currently we only use this to describe row IDs, but it can easily
260 /// be reused for safety of updating a table we're reading from
261 /// (the Halloween problem), or just generally unreproducible results
262 /// (e.g. a TABLESAMPLE changing due to external factors).
263 ///
264 /// Less safe values have higher numerical values.
265 enum Safety : uint8_t {
266 /// The given operation is always safe on this access path.
267 SAFE = 0,
268
269 /// The given operation is safe if this access path is scanned once,
270 /// but not if it's scanned multiple times (e.g. used on the inner side
271 /// of a nested-loop join). A typical example of this is a derived table
272 /// or CTE that is rematerialized on each scan, so that references to
273 /// the old values (such as row IDs) are no longer valid.
275
276 /// The given operation is unsafe on this access path, no matter how many
277 /// or few times it's scanned. Often, it may help to materialize it
278 /// (assuming the materialization itself doesn't use the operation
279 /// in question).
280 UNSAFE = 2
281 };
282
283 /// Whether it is safe to get row IDs (for sorting) from this access path.
285
286 /// Whether this access path counts as one that scans a base table,
287 /// and thus should be counted towards examined_rows. It can sometimes
288 /// seem a bit arbitrary which iterators count towards examined_rows
289 /// and which ones do not, so the only canonical reference is the tests.
291
292#ifndef NDEBUG
293 /// Whether this access path is forced preferred over all others by means
294 /// of a SET DEBUG force_subplan_0x... statement.
296#endif
297
298 /// For UPDATE and DELETE statements: The node index of a table which can be
299 /// updated or deleted from immediately as the rows are read from the
300 /// iterator, if this path is only read from once. -1 if there is no such
301 /// table in this path.
302 ///
303 /// Note that this is an index into CostingReceiver's array of nodes, and is
304 /// not necessarily equal to the table number within the query block given by
305 /// TABLE_LIST::tableno().
306 ///
307 /// The table, if any, is currently always the outermost table in the path.
308 ///
309 /// It is possible to have plans where it would be safe to operate
310 /// "immediately" on more than one table. For example, if we do a merge join,
311 /// it is safe to perform immediate deletes on tables on the inner side of the
312 /// join, since both sides are read only once. (However, we currently do not
313 /// support merge joins.)
314 ///
315 /// Another possibility is when the outer table of a nested loop join is
316 /// guaranteed to return at most one row (typically, a unique index lookup
317 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
318 /// nested loop join. But we don't to this yet.
319 ///
320 /// Hash joins read both sides exactly once, However, with hash joins, the
321 /// scans on the inner tables are not positioned on the correct row when the
322 /// result of the join is returned, so the immediate delete logic will need to
323 /// be changed to reposition the underlying scans before doing the immediate
324 /// deletes. While this can be done, it makes the benefit of immediate deletes
325 /// less obvious for these tables, and it can also be a loss in some cases,
326 /// because we lose the deduplication provided by the Unique object used for
327 /// buffered deletes (the immediate deletes could end up spending time
328 /// repositioning to already deleted rows). So we currently don't attempt to
329 /// do immediate deletes from inner tables of hash joins either.
330 ///
331 /// The outer table of a hash join can be deleted from immediately if the
332 /// inner table fits in memory. If the hash join spills to disk, though,
333 /// neither the rows of the outer table nor the rows of the inner table come
334 /// out in the order of the underlying scan, so it is not safe in general to
335 /// perform immediate deletes on the outer table of a hash join.
336 ///
337 /// If support for immediate operations on multiple tables is added,
338 /// this member could be changed from a node index to a NodeMap.
340
341 /// Which ordering the rows produced by this path follow, if any
342 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
343 /// but we don't want to add a dependency on interesting_orders.h from
344 /// this file, so we use the base type instead of the typedef here.
346
347 /// If an iterator has been instantiated for this access path, points to the
348 /// iterator. Used for constructing iterators that need to talk to each other
349 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
350 /// information in EXPLAIN ANALYZE queries.
352
353 /// Expected number of output rows, -1.0 for unknown.
354 double num_output_rows{-1.0};
355
356 /// Expected cost to read all of this access path once; -1.0 for unknown.
357 double cost{-1.0};
358
359 /// Expected cost to initialize this access path; ie., cost to read
360 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
361 /// Note that EXPLAIN prints out cost of reading the _first_ row
362 /// because it is easier for the user and also easier to measure in
363 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
364 /// initialization cost, so that is what we use in this member.
365 /// -1.0 for unknown.
366 double init_cost{-1.0};
367
368 /// Of init_cost, how much of the initialization needs only to be done
369 /// once per query block. (This is a cost, not a proportion.)
370 /// Ie., if the access path can reuse some its initialization work
371 /// if Init() is called multiple times, this member will be nonzero.
372 /// A typical example is a materialized table with rematerialize=false;
373 /// the second time Init() is called, it's a no-op. Most paths will have
374 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
375 /// We do not intend to use this field to model cache effects.
376 ///
377 /// This is currently not printed in EXPLAIN, only optimizer trace.
378 double init_once_cost{0.0};
379
380 /// Return the cost of scanning the given path for the second time
381 /// (or later) in the given query block. This is really the interesting
382 /// metric, not init_once_cost in itself, but since nearly all paths
383 /// have zero init_once_cost, storing that instead allows us to skip
384 /// a lot of repeated path->init_once_cost = path->init_cost calls
385 /// in the code.
386 double rescan_cost() const { return cost - init_once_cost; }
387
388 /// If no filter, identical to num_output_rows, cost, respectively.
389 /// init_cost is always the same (filters have zero initialization cost).
391
392 /// Bitmap of WHERE predicates that we are including on this access path,
393 /// referring to the “predicates” array internal to the join optimizer.
394 /// Since bit masks are much cheaper to deal with than creating Item
395 /// objects, and we don't invent new conditions during join optimization
396 /// (all of them are known when we begin optimization), we stick to
397 /// manipulating bit masks during optimization, saying which filters will be
398 /// applied at this node (a 1-bit means the filter will be applied here; if
399 /// there are multiple ones, they are ANDed together).
400 ///
401 /// This is used during join optimization only; before iterators are
402 /// created, we will add FILTER access paths to represent these instead,
403 /// removing the dependency on the array. Said FILTER paths are by
404 /// convention created with materialize_subqueries = false, since the by far
405 /// most common case is that there are no subqueries in the predicate.
406 /// In other words, if you wish to represent a filter with
407 /// materialize_subqueries = true, you will need to make an explicit FILTER
408 /// node.
409 ///
410 /// See also nested_loop_join().equijoin_predicates, which is for filters
411 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
413
414 /// Bitmap of sargable join predicates that have already been applied
415 /// in this access path by means of an index lookup (ref access),
416 /// again referring to “predicates”, and thus should not be counted again
417 /// for selectivity. Note that the filter may need to be applied
418 /// nevertheless (especially in case of type conversions); see
419 /// subsumed_sargable_join_predicates.
420 ///
421 /// Since these refer to the same array as filter_predicates, they will
422 /// never overlap with filter_predicates, and so we can reuse the same
423 /// memory using an alias (a union would not be allowed, since OverflowBitset
424 /// is a class with non-trivial default constructor), even though the meaning
425 /// is entirely separate. If N = num_where_predictes in the hypergraph, then
426 /// bits 0..(N-1) belong to filter_predicates, and the rest to
427 /// applied_sargable_join_predicates.
429 return filter_predicates;
430 }
432 return filter_predicates;
433 }
434
435 /// Bitmap of WHERE predicates that touch tables we have joined in,
436 /// but that we could not apply yet (for instance because they reference
437 /// other tables, or because because we could not push them down into
438 /// the nullable side of outer joins). Used during planning only
439 /// (see filter_predicates).
441
442 /// Similar to applied_sargable_join_predicates, bitmap of sargable
443 /// join predicates that have been applied and will subsume the join
444 /// predicate entirely, ie., not only should the selectivity not be
445 /// double-counted, but the predicate itself is redundant and need not
446 /// be applied as a filter. (It is an error to have a bit set here but not
447 /// in applied_sargable_join_predicates.)
449 return delayed_predicates;
450 }
452 return delayed_predicates;
453 }
454
455 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
456 /// loaded when rows from this access path are evaluated; that is, this
457 /// access path must be put on the inner side of a nested-loop join (or
458 /// multiple such joins) where the outer side includes all of the given
459 /// tables.
460 ///
461 /// The most obvious case for this is dependent tables in LATERAL, but a more
462 /// common case is when we have pushed join conditions referring to those
463 /// tables; e.g., if this access path represents t1 and we have a condition
464 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
465 /// be set in this bitmap. We can still join in other tables, deferring t2,
466 /// but the bit(s) will then propagate, and we cannot be on the right side of
467 /// a hash join until parameter_tables is zero again. (Also see
468 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
469 /// as an optimization.)
470 ///
471 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
472 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
473 /// that the access path is entirely noncachable, because it depends on
474 /// something nondeterministic or an outer reference, and thus can never be on
475 /// the right side of a hash join, ever.
477
478 /// Auxiliary data used by a secondary storage engine while processing the
479 /// access path during optimization and execution. The secondary storage
480 /// engine is free to store any useful information in this member, for example
481 /// extra statistics or cost estimates. The data pointed to is fully owned by
482 /// the secondary storage engine, and it is the responsibility of the
483 /// secondary engine to manage the memory and make sure it is properly
484 /// destroyed.
485 void *secondary_engine_data{nullptr};
486
487 // Accessors for the union below.
488 auto &table_scan() {
489 assert(type == TABLE_SCAN);
490 return u.table_scan;
491 }
492 const auto &table_scan() const {
493 assert(type == TABLE_SCAN);
494 return u.table_scan;
495 }
496 auto &index_scan() {
497 assert(type == INDEX_SCAN);
498 return u.index_scan;
499 }
500 const auto &index_scan() const {
501 assert(type == INDEX_SCAN);
502 return u.index_scan;
503 }
504 auto &ref() {
505 assert(type == REF);
506 return u.ref;
507 }
508 const auto &ref() const {
509 assert(type == REF);
510 return u.ref;
511 }
512 auto &ref_or_null() {
513 assert(type == REF_OR_NULL);
514 return u.ref_or_null;
515 }
516 const auto &ref_or_null() const {
517 assert(type == REF_OR_NULL);
518 return u.ref_or_null;
519 }
520 auto &eq_ref() {
521 assert(type == EQ_REF);
522 return u.eq_ref;
523 }
524 const auto &eq_ref() const {
525 assert(type == EQ_REF);
526 return u.eq_ref;
527 }
529 assert(type == PUSHED_JOIN_REF);
530 return u.pushed_join_ref;
531 }
532 const auto &pushed_join_ref() const {
533 assert(type == PUSHED_JOIN_REF);
534 return u.pushed_join_ref;
535 }
537 assert(type == FULL_TEXT_SEARCH);
538 return u.full_text_search;
539 }
540 const auto &full_text_search() const {
541 assert(type == FULL_TEXT_SEARCH);
542 return u.full_text_search;
543 }
544 auto &const_table() {
545 assert(type == CONST_TABLE);
546 return u.const_table;
547 }
548 const auto &const_table() const {
549 assert(type == CONST_TABLE);
550 return u.const_table;
551 }
552 auto &mrr() {
553 assert(type == MRR);
554 return u.mrr;
555 }
556 const auto &mrr() const {
557 assert(type == MRR);
558 return u.mrr;
559 }
560 auto &follow_tail() {
561 assert(type == FOLLOW_TAIL);
562 return u.follow_tail;
563 }
564 const auto &follow_tail() const {
565 assert(type == FOLLOW_TAIL);
566 return u.follow_tail;
567 }
569 assert(type == INDEX_RANGE_SCAN);
570 return u.index_range_scan;
571 }
572 const auto &index_range_scan() const {
573 assert(type == INDEX_RANGE_SCAN);
574 return u.index_range_scan;
575 }
576 auto &index_merge() {
577 assert(type == INDEX_MERGE);
578 return u.index_merge;
579 }
580 const auto &index_merge() const {
581 assert(type == INDEX_MERGE);
582 return u.index_merge;
583 }
585 assert(type == ROWID_INTERSECTION);
586 return u.rowid_intersection;
587 }
588 const auto &rowid_intersection() const {
589 assert(type == ROWID_INTERSECTION);
590 return u.rowid_intersection;
591 }
592 auto &rowid_union() {
593 assert(type == ROWID_UNION);
594 return u.rowid_union;
595 }
596 const auto &rowid_union() const {
597 assert(type == ROWID_UNION);
598 return u.rowid_union;
599 }
601 assert(type == INDEX_SKIP_SCAN);
602 return u.index_skip_scan;
603 }
604 const auto &index_skip_scan() const {
605 assert(type == INDEX_SKIP_SCAN);
606 return u.index_skip_scan;
607 }
609 assert(type == GROUP_INDEX_SKIP_SCAN);
610 return u.group_index_skip_scan;
611 }
612 const auto &group_index_skip_scan() const {
613 assert(type == GROUP_INDEX_SKIP_SCAN);
614 return u.group_index_skip_scan;
615 }
618 return u.dynamic_index_range_scan;
619 }
620 const auto &dynamic_index_range_scan() const {
622 return u.dynamic_index_range_scan;
623 }
626 return u.materialized_table_function;
627 }
628 const auto &materialized_table_function() const {
630 return u.materialized_table_function;
631 }
633 assert(type == UNQUALIFIED_COUNT);
634 return u.unqualified_count;
635 }
636 const auto &unqualified_count() const {
637 assert(type == UNQUALIFIED_COUNT);
638 return u.unqualified_count;
639 }
641 assert(type == TABLE_VALUE_CONSTRUCTOR);
642 return u.table_value_constructor;
643 }
644 const auto &table_value_constructor() const {
645 assert(type == TABLE_VALUE_CONSTRUCTOR);
646 return u.table_value_constructor;
647 }
649 assert(type == FAKE_SINGLE_ROW);
650 return u.fake_single_row;
651 }
652 const auto &fake_single_row() const {
653 assert(type == FAKE_SINGLE_ROW);
654 return u.fake_single_row;
655 }
656 auto &zero_rows() {
657 assert(type == ZERO_ROWS);
658 return u.zero_rows;
659 }
660 const auto &zero_rows() const {
661 assert(type == ZERO_ROWS);
662 return u.zero_rows;
663 }
665 assert(type == ZERO_ROWS_AGGREGATED);
666 return u.zero_rows_aggregated;
667 }
668 const auto &zero_rows_aggregated() const {
669 assert(type == ZERO_ROWS_AGGREGATED);
670 return u.zero_rows_aggregated;
671 }
672 auto &hash_join() {
673 assert(type == HASH_JOIN);
674 return u.hash_join;
675 }
676 const auto &hash_join() const {
677 assert(type == HASH_JOIN);
678 return u.hash_join;
679 }
680 auto &bka_join() {
681 assert(type == BKA_JOIN);
682 return u.bka_join;
683 }
684 const auto &bka_join() const {
685 assert(type == BKA_JOIN);
686 return u.bka_join;
687 }
689 assert(type == NESTED_LOOP_JOIN);
690 return u.nested_loop_join;
691 }
692 const auto &nested_loop_join() const {
693 assert(type == NESTED_LOOP_JOIN);
694 return u.nested_loop_join;
695 }
698 return u.nested_loop_semijoin_with_duplicate_removal;
699 }
702 return u.nested_loop_semijoin_with_duplicate_removal;
703 }
704 auto &filter() {
705 assert(type == FILTER);
706 return u.filter;
707 }
708 const auto &filter() const {
709 assert(type == FILTER);
710 return u.filter;
711 }
712 auto &sort() {
713 assert(type == SORT);
714 return u.sort;
715 }
716 const auto &sort() const {
717 assert(type == SORT);
718 return u.sort;
719 }
720 auto &aggregate() {
721 assert(type == AGGREGATE);
722 return u.aggregate;
723 }
724 const auto &aggregate() const {
725 assert(type == AGGREGATE);
726 return u.aggregate;
727 }
729 assert(type == TEMPTABLE_AGGREGATE);
730 return u.temptable_aggregate;
731 }
732 const auto &temptable_aggregate() const {
733 assert(type == TEMPTABLE_AGGREGATE);
734 return u.temptable_aggregate;
735 }
736 auto &limit_offset() {
737 assert(type == LIMIT_OFFSET);
738 return u.limit_offset;
739 }
740 const auto &limit_offset() const {
741 assert(type == LIMIT_OFFSET);
742 return u.limit_offset;
743 }
744 auto &stream() {
745 assert(type == STREAM);
746 return u.stream;
747 }
748 const auto &stream() const {
749 assert(type == STREAM);
750 return u.stream;
751 }
752 auto &materialize() {
753 assert(type == MATERIALIZE);
754 return u.materialize;
755 }
756 const auto &materialize() const {
757 assert(type == MATERIALIZE);
758 return u.materialize;
759 }
762 return u.materialize_information_schema_table;
763 }
766 return u.materialize_information_schema_table;
767 }
768 auto &append() {
769 assert(type == APPEND);
770 return u.append;
771 }
772 const auto &append() const {
773 assert(type == APPEND);
774 return u.append;
775 }
776 auto &window() {
777 assert(type == WINDOW);
778 return u.window;
779 }
780 const auto &window() const {
781 assert(type == WINDOW);
782 return u.window;
783 }
784 auto &weedout() {
785 assert(type == WEEDOUT);
786 return u.weedout;
787 }
788 const auto &weedout() const {
789 assert(type == WEEDOUT);
790 return u.weedout;
791 }
793 assert(type == REMOVE_DUPLICATES);
794 return u.remove_duplicates;
795 }
796 const auto &remove_duplicates() const {
797 assert(type == REMOVE_DUPLICATES);
798 return u.remove_duplicates;
799 }
802 return u.remove_duplicates_on_index;
803 }
804 const auto &remove_duplicates_on_index() const {
806 return u.remove_duplicates_on_index;
807 }
808 auto &alternative() {
809 assert(type == ALTERNATIVE);
810 return u.alternative;
811 }
812 const auto &alternative() const {
813 assert(type == ALTERNATIVE);
814 return u.alternative;
815 }
817 assert(type == CACHE_INVALIDATOR);
818 return u.cache_invalidator;
819 }
820 const auto &cache_invalidator() const {
821 assert(type == CACHE_INVALIDATOR);
822 return u.cache_invalidator;
823 }
824 auto &delete_rows() {
825 assert(type == DELETE_ROWS);
826 return u.delete_rows;
827 }
828 const auto &delete_rows() const {
829 assert(type == DELETE_ROWS);
830 return u.delete_rows;
831 }
832
833 private:
834 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
835 // It is private to force all access to be through the type-checking
836 // accessors.
837 //
838 // For information about the meaning of each value, see the corresponding
839 // row iterator constructors.
840 union {
841 struct {
844 struct {
845 TABLE *table;
846 int idx;
850 struct {
851 TABLE *table;
853 bool use_order;
854 bool reverse;
856 struct {
857 TABLE *table;
858 TABLE_REF *ref;
859 bool use_order;
861 struct {
862 TABLE *table;
863 TABLE_REF *ref;
864 bool use_order;
866 struct {
867 TABLE *table;
868 TABLE_REF *ref;
869 bool use_order;
872 struct {
873 TABLE *table;
874 TABLE_REF *ref;
875 bool use_order;
879 struct {
880 TABLE *table;
881 TABLE_REF *ref;
883 struct {
884 TABLE *table;
885 TABLE_REF *ref;
890 struct {
891 TABLE *table;
893 struct {
894 // The key part(s) we are scanning on. Note that this may be an array.
895 // You can get the table we are working on by looking into
896 // used_key_parts[0].field->table (it is not stored directly, to avoid
897 // going over the AccessPath size limits).
899
900 // The actual ranges we are scanning over (originally derived from “key”).
901 // Not a Bounds_checked_array, to save 4 bytes on the length.
903 unsigned num_ranges;
904
905 unsigned mrr_flags;
906 unsigned mrr_buf_size;
907
908 // Which index (in the TABLE) we are scanning over, and how many of its
909 // key parts we are using.
910 unsigned index;
912
913 // If true, the scan can return rows in rowid order.
915
916 // If true, the scan _should_ return rows in rowid order.
917 // Should only be set if can_be_used_for_ror == true.
919
920 // If true, this plan can be used for index merge scan.
922
923 // See row intersection for more details.
925
926 // Whether we are scanning over a geometry key part.
927 bool geometry : 1;
928
929 // Whether we need a reverse scan. Only supported if geometry == false.
930 bool reverse : 1;
931
932 // For a reverse scan, if we are using extended key parts. It is needed,
933 // to set correct flags when retrieving records.
936 struct {
937 TABLE *table;
942 struct {
943 TABLE *table;
945
946 // Clustered primary key scan, if any.
948
949 bool forced_by_hint;
952
953 // If true, the first child scan should reuse table->file instead of
954 // creating its own. This is true if the intersection is the topmost
955 // range scan, but _not_ if it's below a union. (The reasons for this
956 // are unknown.) It can also be negated by logic involving
957 // retrieve_full_rows and is_covering, again for unknown reasons.
958 //
959 // This is not only for performance; multi-table delete has a hidden
960 // dependency on this behavior when running against certain types of
961 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
962 // when deleting (and not all table types can transfer the position of one
963 // handler to another by using position()).
964 bool reuse_handler;
965
966 // true if no row retrieval phase is necessary.
969 struct {
970 TABLE *table;
972 bool forced_by_hint;
974 struct {
975 TABLE *table;
976 unsigned index;
977 unsigned num_used_key_parts;
978 bool forced_by_hint;
979
980 // Large, and has nontrivial destructors, so split out into
981 // its own allocation.
984 struct {
985 TABLE *table;
986 unsigned index;
987 unsigned num_used_key_parts;
988 bool forced_by_hint;
989
990 // Large, so split out into its own allocation.
993 struct {
994 TABLE *table;
995 QEP_TAB *qep_tab; // Used only for buffering.
997 struct {
998 TABLE *table;
1002 struct {
1004
1005 struct {
1006 // No members (implicit from the JOIN).
1008 struct {
1009 // No members.
1011 struct {
1012 // The child is optional. It is only used for keeping track of which
1013 // tables are pruned away by this path, and it is only needed when this
1014 // path is on the inner side of an outer join. See ZeroRowsIterator for
1015 // details. The child of a ZERO_ROWS access path will not be visited by
1016 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1017 // only if called with include_pruned_tables = true. No iterator is
1018 // created for the child, and the child is not shown by EXPLAIN.
1020 // Used for EXPLAIN only.
1021 // TODO(sgunders): make an enum.
1022 const char *cause;
1024 struct {
1025 // Used for EXPLAIN only.
1026 // TODO(sgunders): make an enum.
1027 const char *cause;
1029
1030 struct {
1034 bool store_rowids; // Whether we are below a weedout or not.
1038 struct {
1043 bool store_rowids; // Whether we are below a weedout or not.
1046 struct {
1048 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1052
1053 // Equijoin filters to apply before the join, if any.
1054 // Indexes into join_predicate->expr->equijoin_conditions.
1055 // Non-equijoin conditions are always applied.
1056 // If already_expanded_predicates is true, do not re-expand.
1058
1059 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1060 // this struct needs an initializer, or the union would not be
1061 // default-constructible. If we need more than one union member
1062 // with such an initializer, we would probably need to change
1063 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1064 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1065 nullptr, {}};
1066 struct {
1068 const TABLE *table;
1070 size_t key_len;
1072
1073 struct {
1076
1077 // This parameter, unlike nearly all others, is not passed to the the
1078 // actual iterator. Instead, if true, it signifies that when creating
1079 // the iterator, all materializable subqueries in “condition” should be
1080 // materialized (with any in2exists condition removed first). In the
1081 // very rare case that there are two or more such subqueries, this is
1082 // an all-or-nothing decision, for simplicity.
1083 //
1084 // See FinalizeMaterializedSubqueries().
1087 struct {
1091
1092 // If filesort is nullptr: A new filesort will be created at the
1093 // end of optimization, using this order and flags. Otherwise: Ignored.
1097 bool use_limit;
1100 struct {
1104 struct {
1107 TABLE *table;
1111 struct {
1117 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1118 // See Query_expression::send_records.
1121 struct {
1125 TABLE *table;
1127 int ref_slice;
1129 struct {
1130 // NOTE: The only legal access paths within table_path are
1131 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE and
1132 // CONST_TABLE (the latter is somewhat nonsensical).
1134
1135 // Large, and has nontrivial destructors, so split out
1136 // into its own allocation.
1139 struct {
1142 Item *condition;
1144 struct {
1147 struct {
1152 int ref_slice;
1155 struct {
1160 struct {
1165 struct {
1167 TABLE *table;
1168 KEY *key;
1171 struct {
1173
1174 // For the ref.
1178 struct {
1180 const char *name;
1182 struct {
1187 } u;
1188};
1189static_assert(std::is_trivially_destructible<AccessPath>::value,
1190 "AccessPath must be trivially destructible, as it is allocated "
1191 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1192 "(because multiple candidates during planning could point to "
1193 "the same access paths, and refcounting would be expensive)");
1194static_assert(sizeof(AccessPath) <= 136,
1195 "We are creating a lot of access paths in the join "
1196 "optimizer, so be sure not to bloat it without noticing. "
1197 "(96 bytes for the base, 40 bytes for the variant.)");
1198
1199inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1201 to->cost = from.cost;
1202 to->init_cost = from.init_cost;
1203 to->init_once_cost = from.init_once_cost;
1205 to->safe_for_rowid = from.safe_for_rowid;
1206 to->ordering_state = from.ordering_state;
1207}
1208
1209// Trivial factory functions for all of the types of access paths above.
1210
1212 bool count_examined_rows) {
1213 AccessPath *path = new (thd->mem_root) AccessPath;
1215 path->count_examined_rows = count_examined_rows;
1216 path->table_scan().table = table;
1217 return path;
1218}
1219
1220inline AccessPath *NewIndexScanAccessPath(THD *thd, TABLE *table, int idx,
1221 bool use_order, bool reverse,
1222 bool count_examined_rows) {
1223 AccessPath *path = new (thd->mem_root) AccessPath;
1225 path->count_examined_rows = count_examined_rows;
1226 path->index_scan().table = table;
1227 path->index_scan().idx = idx;
1228 path->index_scan().use_order = use_order;
1229 path->index_scan().reverse = reverse;
1230 return path;
1231}
1232
1234 bool use_order, bool reverse,
1235 bool count_examined_rows) {
1236 AccessPath *path = new (thd->mem_root) AccessPath;
1237 path->type = AccessPath::REF;
1238 path->count_examined_rows = count_examined_rows;
1239 path->ref().table = table;
1240 path->ref().ref = ref;
1241 path->ref().use_order = use_order;
1242 path->ref().reverse = reverse;
1243 return path;
1244}
1245
1247 TABLE_REF *ref, bool use_order,
1248 bool count_examined_rows) {
1249 AccessPath *path = new (thd->mem_root) AccessPath;
1251 path->count_examined_rows = count_examined_rows;
1252 path->ref_or_null().table = table;
1253 path->ref_or_null().ref = ref;
1254 path->ref_or_null().use_order = use_order;
1255 return path;
1256}
1257
1259 bool use_order,
1260 bool count_examined_rows) {
1261 AccessPath *path = new (thd->mem_root) AccessPath;
1262 path->type = AccessPath::EQ_REF;
1263 path->count_examined_rows = count_examined_rows;
1264 path->eq_ref().table = table;
1265 path->eq_ref().ref = ref;
1266 path->eq_ref().use_order = use_order;
1267 return path;
1268}
1269
1271 TABLE_REF *ref, bool use_order,
1272 bool is_unique,
1273 bool count_examined_rows) {
1274 AccessPath *path = new (thd->mem_root) AccessPath;
1276 path->count_examined_rows = count_examined_rows;
1277 path->pushed_join_ref().table = table;
1278 path->pushed_join_ref().ref = ref;
1279 path->pushed_join_ref().use_order = use_order;
1280 path->pushed_join_ref().is_unique = is_unique;
1281 return path;
1282}
1283
1285 TABLE_REF *ref,
1286 Item_func_match *ft_func,
1287 bool use_order, bool use_limit,
1288 bool count_examined_rows) {
1289 AccessPath *path = new (thd->mem_root) AccessPath;
1291 path->count_examined_rows = count_examined_rows;
1292 path->full_text_search().table = table;
1293 path->full_text_search().ref = ref;
1294 path->full_text_search().use_order = use_order;
1295 path->full_text_search().use_limit = use_limit;
1296 path->full_text_search().ft_func = ft_func;
1297 return path;
1298}
1299
1301 TABLE_REF *ref,
1302 bool count_examined_rows) {
1303 AccessPath *path = new (thd->mem_root) AccessPath;
1305 path->count_examined_rows = count_examined_rows;
1306 path->num_output_rows = 1.0;
1307 path->cost = 0.0;
1308 path->init_cost = 0.0;
1309 path->init_once_cost = 0.0;
1310 path->const_table().table = table;
1311 path->const_table().ref = ref;
1312 return path;
1313}
1314
1316 int mrr_flags) {
1317 AccessPath *path = new (thd->mem_root) AccessPath;
1318 path->type = AccessPath::MRR;
1319 path->mrr().table = table;
1320 path->mrr().ref = ref;
1321 path->mrr().mrr_flags = mrr_flags;
1322
1323 // This will be filled in when the BKA iterator is created.
1324 path->mrr().bka_path = nullptr;
1325
1326 return path;
1327}
1328
1330 bool count_examined_rows) {
1331 AccessPath *path = new (thd->mem_root) AccessPath;
1333 path->count_examined_rows = count_examined_rows;
1334 path->follow_tail().table = table;
1335 return path;
1336}
1337
1339 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1340 AccessPath *path = new (thd->mem_root) AccessPath;
1342 path->count_examined_rows = count_examined_rows;
1343 path->dynamic_index_range_scan().table = table;
1344 path->dynamic_index_range_scan().qep_tab = qep_tab;
1345 return path;
1346}
1347
1349 THD *thd, TABLE *table, Table_function *table_function,
1350 AccessPath *table_path) {
1351 AccessPath *path = new (thd->mem_root) AccessPath;
1353 path->materialized_table_function().table = table;
1354 path->materialized_table_function().table_function = table_function;
1355 path->materialized_table_function().table_path = table_path;
1356 return path;
1357}
1358
1360 AccessPath *path = new (thd->mem_root) AccessPath;
1362 return path;
1363}
1364
1366 AccessPath *path = new (thd->mem_root) AccessPath;
1368 // The iterator keeps track of which row it is at in examined_rows,
1369 // so we always need to give it the pointer.
1370 path->count_examined_rows = true;
1371 return path;
1372}
1373
1375 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1376 KEY *key, size_t key_len) {
1377 AccessPath *path = new (thd->mem_root) AccessPath;
1379 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1380 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1381 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1382 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1383 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1384 return path;
1385}
1386
1388 Item *condition) {
1389 AccessPath *path = new (thd->mem_root) AccessPath;
1390 path->type = AccessPath::FILTER;
1391 path->filter().child = child;
1392 path->filter().condition = condition;
1393 path->filter().materialize_subqueries = false;
1394 return path;
1395}
1396
1397// Not inline, because it needs access to filesort internals
1398// (which are forward-declared in this file).
1400 bool count_examined_rows);
1401
1403 bool rollup) {
1404 AccessPath *path = new (thd->mem_root) AccessPath;
1406 path->aggregate().child = child;
1407 path->aggregate().rollup = rollup;
1408 return path;
1409}
1410
1412 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1413 TABLE *table, AccessPath *table_path, int ref_slice) {
1414 AccessPath *path = new (thd->mem_root) AccessPath;
1416 path->temptable_aggregate().subquery_path = subquery_path;
1417 path->temptable_aggregate().temp_table_param = temp_table_param;
1418 path->temptable_aggregate().table = table;
1419 path->temptable_aggregate().table_path = table_path;
1420 path->temptable_aggregate().ref_slice = ref_slice;
1421 return path;
1422}
1423
1425 ha_rows limit, ha_rows offset,
1426 bool count_all_rows,
1427 bool reject_multiple_rows,
1428 ha_rows *send_records_override) {
1429 AccessPath *path = new (thd->mem_root) AccessPath;
1431 path->immediate_update_delete_table = child->immediate_update_delete_table;
1432 path->limit_offset().child = child;
1433 path->limit_offset().limit = limit;
1434 path->limit_offset().offset = offset;
1435 path->limit_offset().count_all_rows = count_all_rows;
1436 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1437 path->limit_offset().send_records_override = send_records_override;
1438
1439 if (child->num_output_rows >= 0.0) {
1440 path->num_output_rows =
1441 offset >= child->num_output_rows
1442 ? 0.0
1443 : (std::min<double>(child->num_output_rows, limit) - offset);
1444 }
1445
1446 if (child->init_cost < 0.0) {
1447 // We have nothing better, since we don't know how much is startup cost.
1448 path->cost = child->cost;
1449 } else if (child->num_output_rows < 1e-6) {
1450 path->cost = path->init_cost = child->init_cost;
1451 } else {
1452 const double fraction_start_read =
1453 std::min(1.0, double(offset) / child->num_output_rows);
1454 const double fraction_full_read =
1455 std::min(1.0, double(limit) / child->num_output_rows);
1456 path->cost = child->init_cost +
1457 fraction_full_read * (child->cost - child->init_cost);
1458 path->init_cost = child->init_cost +
1459 fraction_start_read * (child->cost - child->init_cost);
1460 }
1461 path->init_once_cost = child->init_once_cost;
1462 path->ordering_state = child->ordering_state;
1463
1464 return path;
1465}
1466
1468 bool count_examined_rows) {
1469 AccessPath *path = new (thd->mem_root) AccessPath;
1471 path->count_examined_rows = count_examined_rows;
1472 path->num_output_rows = 1.0;
1473 path->cost = 0.0;
1474 path->init_cost = 0.0;
1475 path->init_once_cost = 0.0;
1476 return path;
1477}
1478
1480 const char *cause) {
1481 AccessPath *path = new (thd->mem_root) AccessPath;
1483 path->zero_rows().child = child;
1484 path->zero_rows().cause = cause;
1485 path->num_output_rows = 0.0;
1486 path->cost = 0.0;
1487 path->init_cost = 0.0;
1488 path->init_once_cost = 0.0;
1489 return path;
1490}
1491
1492inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1493 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1494}
1495
1497 const char *cause) {
1498 AccessPath *path = new (thd->mem_root) AccessPath;
1500 path->zero_rows_aggregated().cause = cause;
1501 path->num_output_rows = 1.0;
1502 path->cost = 0.0;
1503 path->init_cost = 0.0;
1504 return path;
1505}
1506
1508 JOIN *join,
1509 Temp_table_param *temp_table_param,
1510 TABLE *table, int ref_slice) {
1511 AccessPath *path = new (thd->mem_root) AccessPath;
1512 path->type = AccessPath::STREAM;
1513 path->stream().child = child;
1514 path->stream().join = join;
1515 path->stream().temp_table_param = temp_table_param;
1516 path->stream().table = table;
1517 path->stream().ref_slice = ref_slice;
1518 // Will be set later if we get a weedout access path as parent.
1519 path->stream().provide_rowid = false;
1520 return path;
1521}
1522
1525 JOIN *join, bool copy_items,
1526 Temp_table_param *temp_table_param) {
1527 assert(path != nullptr);
1529 MaterializePathParameters::QueryBlock &query_block = array[0];
1530 query_block.subquery_path = path;
1531 query_block.select_number = select_number;
1532 query_block.join = join;
1533 query_block.disable_deduplication_by_hash_field = false;
1534 query_block.copy_items = copy_items;
1535 query_block.temp_table_param = temp_table_param;
1536 return array;
1537}
1538
1540 THD *thd,
1542 Mem_root_array<const AccessPath *> *invalidators, TABLE *table,
1543 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1544 int ref_slice, bool rematerialize, ha_rows limit_rows,
1545 bool reject_multiple_rows) {
1548 param->query_blocks = std::move(query_blocks);
1549 if (rematerialize) {
1550 // There's no point in adding invalidators if we're rematerializing
1551 // every time anyway.
1552 param->invalidators = nullptr;
1553 } else {
1554 param->invalidators = invalidators;
1555 }
1556 param->table = table;
1557 param->cte = cte;
1558 param->unit = unit;
1559 param->ref_slice = ref_slice;
1560 param->rematerialize = rematerialize;
1561 param->limit_rows = limit_rows;
1562 param->reject_multiple_rows = reject_multiple_rows;
1563
1564#ifndef NDEBUG
1565 for (MaterializePathParameters::QueryBlock &query_block :
1566 param->query_blocks) {
1567 assert(query_block.subquery_path != nullptr);
1568 }
1569#endif
1570
1571 AccessPath *path = new (thd->mem_root) AccessPath;
1573 path->materialize().table_path = table_path;
1574 path->materialize().param = param;
1575 if (rematerialize) {
1576 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1577 } else {
1578 // The default; this is just to be explicit in the code.
1579 path->safe_for_rowid = AccessPath::SAFE;
1580 }
1581 return path;
1582}
1583
1585 THD *thd, AccessPath *table_path, TABLE_LIST *table_list, Item *condition) {
1586 AccessPath *path = new (thd->mem_root) AccessPath;
1588 path->materialize_information_schema_table().table_path = table_path;
1589 path->materialize_information_schema_table().table_list = table_list;
1590 path->materialize_information_schema_table().condition = condition;
1591 return path;
1592}
1593
1594// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1595// long as the access path.
1597 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1598 AccessPath *path = new (thd->mem_root) AccessPath;
1599 path->type = AccessPath::APPEND;
1600 path->append().children = children;
1601 path->cost = 0.0;
1602 path->init_cost = 0.0;
1603 path->init_once_cost = 0.0;
1604 for (const AppendPathParameters &child : *children) {
1605 path->cost += child.path->cost;
1606 path->init_cost += child.path->init_cost;
1607 path->init_once_cost += child.path->init_once_cost;
1608 }
1609 return path;
1610}
1611
1613 Temp_table_param *temp_table_param,
1614 int ref_slice, bool needs_buffering) {
1615 AccessPath *path = new (thd->mem_root) AccessPath;
1616 path->type = AccessPath::WINDOW;
1617 path->window().child = child;
1618 path->window().temp_table_param = temp_table_param;
1619 path->window().ref_slice = ref_slice;
1620 path->window().needs_buffering = needs_buffering;
1621 return path;
1622}
1623
1625 SJ_TMP_TABLE *weedout_table) {
1626 AccessPath *path = new (thd->mem_root) AccessPath;
1627 path->type = AccessPath::WEEDOUT;
1628 path->weedout().child = child;
1629 path->weedout().weedout_table = weedout_table;
1630 path->weedout().tables_to_get_rowid_for =
1631 0; // Must be handled by the caller.
1632 return path;
1633}
1634
1636 Item **group_items,
1637 int group_items_size) {
1638 AccessPath *path = new (thd->mem_root) AccessPath;
1640 path->remove_duplicates().child = child;
1641 path->remove_duplicates().group_items = group_items;
1642 path->remove_duplicates().group_items_size = group_items_size;
1643 return path;
1644}
1645
1647 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1648 unsigned loosescan_key_len) {
1649 AccessPath *path = new (thd->mem_root) AccessPath;
1651 path->remove_duplicates_on_index().child = child;
1652 path->remove_duplicates_on_index().table = table;
1653 path->remove_duplicates_on_index().key = key;
1654 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1655 return path;
1656}
1657
1659 AccessPath *table_scan_path,
1660 TABLE_REF *used_ref) {
1661 AccessPath *path = new (thd->mem_root) AccessPath;
1663 path->alternative().table_scan_path = table_scan_path;
1664 path->alternative().child = child;
1665 path->alternative().used_ref = used_ref;
1666 return path;
1667}
1668
1670 const char *name) {
1671 AccessPath *path = new (thd->mem_root) AccessPath;
1673 path->cache_invalidator().child = child;
1674 path->cache_invalidator().name = name;
1675 return path;
1676}
1677
1679 table_map delete_tables,
1680 table_map immediate_tables);
1681
1682/**
1683 Modifies "path" and the paths below it so that they provide row IDs for
1684 all tables.
1685 */
1687
1688/**
1689 If the path is a FILTER path marked that subqueries are to be materialized,
1690 do so. If not, do nothing.
1691
1692 It is important that this is not called until the entire plan is ready;
1693 not just when planning a single query block. The reason is that a query
1694 block A with materializable subqueries may itself be part of a materializable
1695 subquery B, so if one calls this when planning A, the subqueries in A will
1696 irrevocably be materialized, even if that is not the optimal plan given B.
1697 Thus, this is done when creating iterators.
1698 */
1700
1703 bool eligible_for_batch_mode);
1704
1705// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1706// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1707// that does anything else is DynamicRangeIterator.)
1709 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1711 eligible_for_batch_mode);
1712}
1713
1714void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1715 const POSITION *pos, bool is_after_filter,
1716 AccessPath *path);
1717
1718/**
1719 Return the TABLE* referred from 'path' if it is a basic access path,
1720 else a nullptr is returned. Temporary tables, such as those used by
1721 sorting, aggregate and subquery materialization are not returned.
1722*/
1724
1725/**
1726 Returns a map of all tables read when `path` or any of its children are
1727 exectued. Only iterators that are part of the same query block as `path`
1728 are considered.
1729
1730 If a table is read that doesn't have a map, specifically the temporary
1731 tables made as part of materialization within the same query block,
1732 RAND_TABLE_BIT will be set as a convention and none of that access path's
1733 children will be included in the map. In this case, the caller will need to
1734 manually go in and find said access path, to ask it for its TABLE object.
1735
1736 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1737 access path (ie., pruned away due to impossible join conditions) will be
1738 included in the map. This is normally what you want, as those tables need to
1739 be included whenever you store NULL flags and the likes, but if you don't
1740 want them (perhaps to specifically check for conditions referring to pruned
1741 tables), you can set it to false.
1742 */
1743table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1744
1745/**
1746 Find the list of all tables used by this root, stopping at materializations.
1747 Used for knowing which tables to sort.
1748 */
1750
1751/**
1752 For each access path in the (sub)tree rooted at “path”, expand any use of
1753 “filter_predicates” into newly-inserted FILTER access paths, using the given
1754 predicate list. This is used after finding an optimal set of access paths,
1755 to normalize the tree so that the remaining consumers do not need to worry
1756 about filter_predicates and cost_before_filter.
1757
1758 “join” is the join that “path” is part of.
1759 */
1761 const Mem_root_array<Predicate> &predicates,
1762 unsigned num_where_predicates);
1763
1764/// Like ExpandFilterAccessPaths(), but expands only the single access path
1765/// at “path”.
1767 const Mem_root_array<Predicate> &predicates,
1768 unsigned num_where_predicates);
1769
1770/// Returns the tables that have stored row IDs in the hash join result.
1772
1773#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1233
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1507
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1635
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1669
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:85
bool FinalizeMaterializedSubqueries(THD *thd, JOIN *join, AccessPath *path)
If the path is a FILTER path marked that subqueries are to be materialized, do so.
Definition: access_path.cc:250
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1199
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1209
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1411
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1338
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1270
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1496
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, TABLE_REF *used_ref)
Definition: access_path.h:1658
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1612
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are exectued.
Definition: access_path.cc:180
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1596
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1374
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, bool count_examined_rows)
Definition: access_path.cc:59
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1258
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1646
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1467
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1387
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, bool count_examined_rows)
Definition: access_path.h:1300
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1284
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1348
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::QueryBlock > query_blocks, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1539
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:211
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1211
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1306
table_map GetTablesWithRowIDsInHashJoin(AccessPath *path)
Returns the tables that have stored row IDs in the hash join result.
Definition: access_path.cc:1318
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, int mrr_flags)
Definition: access_path.h:1315
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1121
Mem_root_array< MaterializePathParameters::QueryBlock > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1524
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, bool rollup)
Definition: access_path.h:1402
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:134
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1424
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1329
AccessPath * NewTableValueConstructorAccessPath(THD *thd)
Definition: access_path.h:1365
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:320
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1624
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1479
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, TABLE_LIST *table_list, Item *condition)
Definition: access_path.h:1584
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1359
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1220
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, TABLE_REF *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1246
A wrapper class which provides array bounds checking.
Definition: sql_array.h:46
After parsing, a Common Table Expression is accessed through a TABLE_LIST.
Definition: table.h:4225
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:51
Sorting related info.
Definition: filesort.h:51
Definition: item_func.h:3447
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:802
Definition: sql_optimizer.h:125
Definition: key.h:112
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:421
Definition: overflow_bitset.h:76
Definition: sql_executor.h:259
Definition: range_optimizer.h:68
MEM_ROOT * mem_root
Definition: sql_class.h:367
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:629
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:60
Definition: sql_executor.h:103
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_class.h:945
Class representing a table function.
Definition: table_function.h:52
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:98
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:104
static MEM_ROOT mem_root
Definition: client_plugin.cc:109
char * pos
Definition: do_ctype.cc:76
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:366
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1701
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:62
JoinType
Definition: join_type.h:27
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:488
my_off_t ha_rows
Definition: my_base.h:1139
uint64_t table_map
Definition: my_table_map.h:29
static char * path
Definition: mysqldump.cc:130
PT & ref(PT *tp)
Definition: tablespace_impl.cc:358
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:39
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:131
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:150
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:59
case opt name
Definition: sslopt-case.h:32
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:188
auto & weedout()
Definition: access_path.h:784
auto & filter()
Definition: access_path.h:704
bool count_all_rows
Definition: access_path.h:1115
AccessPath * bka_path
Definition: access_path.h:886
auto & ref_or_null()
Definition: access_path.h:512
auto & materialized_table_function()
Definition: access_path.h:624
AccessPath * cpk_child
Definition: access_path.h:947
auto & rowid_union()
Definition: access_path.h:592
double cost_before_filter
Definition: access_path.h:390
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:476
const auto & bka_join() const
Definition: access_path.h:684
struct AccessPath::@49::@63 rowid_union
TABLE * temp_table
Definition: access_path.h:1150
OverflowBitset equijoin_predicates
Definition: access_path.h:1057
Item ** group_items
Definition: access_path.h:1162
struct AccessPath::@49::@77 filter
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:386
bool rollup
Definition: access_path.h:1102
auto & materialize()
Definition: access_path.h:752
const auto & temptable_aggregate() const
Definition: access_path.h:732
double num_output_rows
Expected number of output rows, -1.0 for unknown.
Definition: access_path.h:354
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:448
auto & index_scan()
Definition: access_path.h:496
struct AccessPath::@49::@79 aggregate
const auto & delete_rows() const
Definition: access_path.h:828
struct AccessPath::@49::@85 append
const auto & filter() const
Definition: access_path.h:708
auto & delete_rows()
Definition: access_path.h:824
bool reuse_handler
Definition: access_path.h:924
KEY_PART * used_key_part
Definition: access_path.h:898
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:428
Item * condition
Definition: access_path.h:1075
const auto & index_merge() const
Definition: access_path.h:580
struct AccessPath::@49::@73 hash_join
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:808
auto & pushed_join_ref()
Definition: access_path.h:528
struct AccessPath::@49::@81 limit_offset
AccessPath * outer
Definition: access_path.h:1031
struct AccessPath::@49::@80 temptable_aggregate
struct AccessPath::@49::@69 table_value_constructor
struct AccessPath::@49::@58 mrr
struct AccessPath::@49::@87 weedout
auto & index_skip_scan()
Definition: access_path.h:600
struct AccessPath::@49::@82 stream
bool rewrite_semi_to_inner
Definition: access_path.h:1035
const auto & zero_rows_aggregated() const
Definition: access_path.h:668
auto & group_index_skip_scan()
Definition: access_path.h:608
struct AccessPath::@49::@64 index_skip_scan
struct AccessPath::@49::@75 nested_loop_join
const auto & eq_ref() const
Definition: access_path.h:524
int group_items_size
Definition: access_path.h:1163
bool use_order
Definition: access_path.h:847
bool pfs_batch_mode
Definition: access_path.h:1049
auto & temptable_aggregate()
Definition: access_path.h:728
AccessPath()
Definition: access_path.h:194
GroupIndexSkipScanParameters * param
Definition: access_path.h:991
auto & rowid_intersection()
Definition: access_path.h:584
struct AccessPath::@49::@89 remove_duplicates_on_index
bool materialize_subqueries
Definition: access_path.h:1085
AccessPath * child
Definition: access_path.h:1019
bool allow_spill_to_disk
Definition: access_path.h:1033
auto & mrr()
Definition: access_path.h:552
const auto & index_scan() const
Definition: access_path.h:500
const auto & fake_single_row() const
Definition: access_path.h:652
bool reject_multiple_rows
Definition: access_path.h:1116
bool allow_clustered_primary_key_scan
Definition: access_path.h:939
const char * name
Definition: access_path.h:1180
bool use_limit
Definition: access_path.h:876
Window * window
Definition: access_path.h:1149
table_map tables_to_get_rowid_for
Definition: access_path.h:1036
struct AccessPath::@49::@66 dynamic_index_range_scan
auto & materialize_information_schema_table()
Definition: access_path.h:760
auto & bka_join()
Definition: access_path.h:680
const auto & follow_tail() const
Definition: access_path.h:564
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1157
const auto & window() const
Definition: access_path.h:780
const auto & materialized_table_function() const
Definition: access_path.h:628
struct AccessPath::@49::@55 pushed_join_ref
auto & unqualified_count()
Definition: access_path.h:632
bool keep_current_rowid
Definition: access_path.h:888
bool using_extended_key_parts
Definition: access_path.h:934
auto & nested_loop_join()
Definition: access_path.h:688
const auto & full_text_search() const
Definition: access_path.h:540
bool can_be_used_for_ror
Definition: access_path.h:914
float rec_per_key
Definition: access_path.h:1042
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:284
JOIN * join
Definition: access_path.h:1123
unsigned index
Definition: access_path.h:910
struct AccessPath::@49::@76 nested_loop_semijoin_with_duplicate_removal
unsigned mrr_buf_size
Definition: access_path.h:906
auto & remove_duplicates()
Definition: access_path.h:792
struct AccessPath::@49::@65 group_index_skip_scan
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:351
struct AccessPath::@49::@74 bka_join
const auto & stream() const
Definition: access_path.h:748
double init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:366
const auto & remove_duplicates() const
Definition: access_path.h:796
auto & window()
Definition: access_path.h:776
bool need_rows_in_rowid_order
Definition: access_path.h:918
const auto & unqualified_count() const
Definition: access_path.h:636
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:485
bool is_covering
Definition: access_path.h:967
const auto & mrr() const
Definition: access_path.h:556
bool remove_duplicates
Definition: access_path.h:1095
const auto & table_value_constructor() const
Definition: access_path.h:644
KEY * key
Definition: access_path.h:1069
auto & table_value_constructor()
Definition: access_path.h:640
auto & stream()
Definition: access_path.h:744
size_t key_len
Definition: access_path.h:1070
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:451
const auto & zero_rows() const
Definition: access_path.h:660
const auto & append() const
Definition: access_path.h:772
ha_rows offset
Definition: access_path.h:1114
int idx
Definition: access_path.h:846
auto & hash_join()
Definition: access_path.h:672
auto & cache_invalidator()
Definition: access_path.h:816
bool force_sort_rowids
Definition: access_path.h:1098
unsigned num_used_key_parts
Definition: access_path.h:911
unsigned num_ranges
Definition: access_path.h:903
ORDER * order
Definition: access_path.h:1094
auto & sort()
Definition: access_path.h:712
struct AccessPath::@49::@78 sort
auto & fake_single_row()
Definition: access_path.h:648
struct AccessPath::@49::@84 materialize_information_schema_table
struct AccessPath::@49::@91 cache_invalidator
ha_rows limit
Definition: access_path.h:1113
struct AccessPath::@49::@56 full_text_search
auto & follow_tail()
Definition: access_path.h:560
struct AccessPath::@49::@51 index_scan
const JoinPredicate * join_predicate
Definition: access_path.h:1032
const auto & const_table() const
Definition: access_path.h:548
const auto & dynamic_index_range_scan() const
Definition: access_path.h:620
const auto & table_scan() const
Definition: access_path.h:492
unsigned mrr_flags
Definition: access_path.h:905
int ref_slice
Definition: access_path.h:1109
TABLE_LIST * table_list
Definition: access_path.h:1141
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:412
bool can_be_used_for_imerge
Definition: access_path.h:921
bool forced_by_hint
Definition: access_path.h:938
auto & limit_offset()
Definition: access_path.h:736
AccessPath * inner
Definition: access_path.h:1031
bool provide_rowid
Definition: access_path.h:1126
const auto & index_range_scan() const
Definition: access_path.h:572
const auto & ref() const
Definition: access_path.h:508
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:295
Item_func_match * ft_func
Definition: access_path.h:877
struct AccessPath::@49::@70 fake_single_row
ha_rows * send_records_override
Definition: access_path.h:1119
auto & table_scan()
Definition: access_path.h:488
const auto & hash_join() const
Definition: access_path.h:676
QEP_TAB * qep_tab
Definition: access_path.h:995
Table_function * table_function
Definition: access_path.h:999
struct AccessPath::@49::@68 unqualified_count
auto & zero_rows_aggregated()
Definition: access_path.h:664
const auto & sort() const
Definition: access_path.h:716
bool unwrap_rollup
Definition: access_path.h:1096
double init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:378
struct AccessPath::@49::@61 index_merge
Type
Definition: access_path.h:203
@ FOLLOW_TAIL
Definition: access_path.h:214
@ FILTER
Definition: access_path.h:238
@ PUSHED_JOIN_REF
Definition: access_path.h:210
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:227
@ AGGREGATE
Definition: access_path.h:240
@ BKA_JOIN
Definition: access_path.h:234
@ ZERO_ROWS
Definition: access_path.h:226
@ CONST_TABLE
Definition: access_path.h:212
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:220
@ INDEX_RANGE_SCAN
Definition: access_path.h:215
@ UNQUALIFIED_COUNT
Definition: access_path.h:229
@ EQ_REF
Definition: access_path.h:209
@ FAKE_SINGLE_ROW
Definition: access_path.h:225
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:245
@ WINDOW
Definition: access_path.h:247
@ REF_OR_NULL
Definition: access_path.h:208
@ MATERIALIZE
Definition: access_path.h:244
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:233
@ ROWID_UNION
Definition: access_path.h:218
@ INDEX_SKIP_SCAN
Definition: access_path.h:219
@ MRR
Definition: access_path.h:213
@ CACHE_INVALIDATOR
Definition: access_path.h:252
@ INDEX_SCAN
Definition: access_path.h:206
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:224
@ WEEDOUT
Definition: access_path.h:248
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:228
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:250
@ TABLE_SCAN
Definition: access_path.h:205
@ REF
Definition: access_path.h:207
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:241
@ LIMIT_OFFSET
Definition: access_path.h:242
@ APPEND
Definition: access_path.h:246
@ NESTED_LOOP_JOIN
Definition: access_path.h:232
@ INDEX_MERGE
Definition: access_path.h:216
@ FULL_TEXT_SEARCH
Definition: access_path.h:211
@ ALTERNATIVE
Definition: access_path.h:251
@ STREAM
Definition: access_path.h:243
@ REMOVE_DUPLICATES
Definition: access_path.h:249
@ ROWID_INTERSECTION
Definition: access_path.h:217
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:221
@ DELETE_ROWS
Definition: access_path.h:255
@ SORT
Definition: access_path.h:239
@ HASH_JOIN
Definition: access_path.h:235
bool retrieve_full_rows
Definition: access_path.h:950
union AccessPath::@49 u
const auto & rowid_union() const
Definition: access_path.h:596
const TABLE * table
Definition: access_path.h:1068
const auto & index_skip_scan() const
Definition: access_path.h:604
auto & dynamic_index_range_scan()
Definition: access_path.h:616
struct AccessPath::@49::@71 zero_rows
const auto & pushed_join_ref() const
Definition: access_path.h:532
auto & remove_duplicates_on_index()
Definition: access_path.h:800
table_map immediate_tables
Definition: access_path.h:1185
int mrr_flags
Definition: access_path.h:887
auto & full_text_search()
Definition: access_path.h:536
const auto & materialize() const
Definition: access_path.h:756
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:345
struct AccessPath::@49::@83 materialize
auto & eq_ref()
Definition: access_path.h:520
struct AccessPath::@49::@53 ref_or_null
struct AccessPath::@49::@90 alternative
JoinType join_type
Definition: access_path.h:1040
struct AccessPath::@49::@54 eq_ref
const auto & limit_offset() const
Definition: access_path.h:740
MaterializePathParameters * param
Definition: access_path.h:1137
const char * cause
Definition: access_path.h:1022
auto & index_merge()
Definition: access_path.h:576
auto & aggregate()
Definition: access_path.h:720
struct AccessPath::@49::@60 index_range_scan
const auto & group_index_skip_scan() const
Definition: access_path.h:612
auto & zero_rows()
Definition: access_path.h:656
AccessPath * subquery_path
Definition: access_path.h:1105
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1145
TABLE_REF * ref
Definition: access_path.h:852
double cost
Expected cost to read all of this access path once; -1.0 for unknown.
Definition: access_path.h:357
const auto & nested_loop_join() const
Definition: access_path.h:692
bool already_expanded_predicates
Definition: access_path.h:1050
const auto & remove_duplicates_on_index() const
Definition: access_path.h:804
Temp_table_param * temp_table_param
Definition: access_path.h:1106
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:696
bool reverse
Definition: access_path.h:848
AccessPath * table_scan_path
Definition: access_path.h:1172
const auto & materialize_information_schema_table() const
Definition: access_path.h:764
bool geometry
Definition: access_path.h:927
struct AccessPath::@49::@67 materialized_table_function
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:290
unsigned loosescan_key_len
Definition: access_path.h:1169
auto & ref()
Definition: access_path.h:504
struct AccessPath::@49::@59 follow_tail
table_map tables_to_delete_from
Definition: access_path.h:1184
IndexSkipScanParameters * param
Definition: access_path.h:982
struct AccessPath::@49::@62 rowid_intersection
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:431
Mem_root_array< AccessPath * > * children
Definition: access_path.h:940
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:440
AccessPath * table_path
Definition: access_path.h:1000
const auto & aggregate() const
Definition: access_path.h:724
TABLE * table
Definition: access_path.h:842
auto & append()
Definition: access_path.h:768
TABLE_REF * used_ref
Definition: access_path.h:1176
const auto & weedout() const
Definition: access_path.h:788
const auto & cache_invalidator() const
Definition: access_path.h:820
bool needs_buffering
Definition: access_path.h:1153
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:339
QUICK_RANGE ** ranges
Definition: access_path.h:902
struct AccessPath::@49::@57 const_table
bool is_unique
Definition: access_path.h:870
const auto & alternative() const
Definition: access_path.h:812
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:265
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:274
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:280
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:267
auto & const_table()
Definition: access_path.h:544
struct AccessPath::@49::@50 table_scan
unsigned mrr_length_per_rec
Definition: access_path.h:1041
const auto & rowid_intersection() const
Definition: access_path.h:588
auto & index_range_scan()
Definition: access_path.h:568
struct AccessPath::@49::@72 zero_rows_aggregated
double num_output_rows_before_filter
If no filter, identical to num_output_rows, cost, respectively.
Definition: access_path.h:390
const auto & ref_or_null() const
Definition: access_path.h:516
struct AccessPath::@49::@92 delete_rows
Filesort * filesort
Definition: access_path.h:1089
bool store_rowids
Definition: access_path.h:1034
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:700
Definition: access_path.h:161
JOIN * join
Definition: access_path.h:163
AccessPath * path
Definition: access_path.h:162
Definition: group_index_skip_scan_plan.h:44
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:72
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:74
int semijoin_group_size
Definition: access_path.h:107
FunctionalDependencySet functional_dependencies
Definition: access_path.h:91
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:96
RelationalExpression * expr
Definition: access_path.h:75
double selectivity
Definition: access_path.h:76
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:101
Item ** semijoin_group
Definition: access_path.h:106
size_t estimated_bytes_per_row
Definition: access_path.h:80
Definition: range_optimizer.h:54
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:82
Definition: materialize_path_parameters.h:41
int select_number
Definition: materialize_path_parameters.h:43
bool copy_items
Definition: materialize_path_parameters.h:46
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:47
AccessPath * subquery_path
Definition: materialize_path_parameters.h:42
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:45
JOIN * join
Definition: materialize_path_parameters.h:44
Definition: materialize_path_parameters.h:39
bool rematerialize
Definition: materialize_path_parameters.h:56
Common_table_expr * cte
Definition: materialize_path_parameters.h:53
bool reject_multiple_rows
Definition: materialize_path_parameters.h:58
ha_rows limit_rows
Definition: materialize_path_parameters.h:57
Mem_root_array< QueryBlock > query_blocks
Definition: materialize_path_parameters.h:50
TABLE * table
Definition: materialize_path_parameters.h:52
int ref_slice
Definition: materialize_path_parameters.h:55
Query_expression * unit
Definition: materialize_path_parameters.h:54
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:51
Definition: table.h:279
A position of table within a join order.
Definition: sql_select.h:351
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:115
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:128
bool was_join_condition
Definition: access_path.h:141
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:153
FunctionalDependencySet functional_dependencies
Definition: access_path.h:152
int source_multiple_equality_idx
Definition: access_path.h:149
hypergraph::NodeMap used_nodes
Definition: access_path.h:119
Item * condition
Definition: access_path.h:116
double selectivity
Definition: access_path.h:130
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:158
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:85
Definition: table.h:2684
Definition: sql_opt_exec_shared.h:58
Definition: table.h:1394