MySQL 8.0.33
Source Code Documentation
access_path.h
Go to the documentation of this file.
1/* Copyright (c) 2020, 2023, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23#ifndef SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
24#define SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
25
26#include <assert.h>
27#include <stdint.h>
28
29#include <string>
30#include <type_traits>
31#include <vector>
32
39#include "sql/join_type.h"
40#include "sql/mem_root_array.h"
41#include "sql/sql_array.h"
42#include "sql/sql_class.h"
43#include "sql/table.h"
44
45template <class T>
48class Filesort;
49class Item;
50class Item_func_match;
51class JOIN;
52class KEY;
53class QEP_TAB;
54class QUICK_RANGE;
55class SJ_TMP_TABLE;
56class Table_function;
58class Window;
59struct AccessPath;
62struct Index_lookup;
63struct KEY_PART;
64struct ORDER;
65struct POSITION;
67struct TABLE;
68
69/**
70 A specification that two specific relational expressions
71 (e.g., two tables, or a table and a join between two other tables)
72 should be joined together. The actual join conditions, if any,
73 live inside the “expr” object, as does the join type etc.
74 */
78
79 // If this join is made using a hash join, estimates the width
80 // of each row as stored in the hash table, in bytes.
82
83 // The set of (additional) functional dependencies that are active
84 // after this join predicate has been applied. E.g. if we're joining
85 // on t1.x = t2.x, there will be a bit for that functional dependency.
86 // We don't currently support more complex join conditions, but there's
87 // no conceptual reason why we couldn't, e.g. a join on a = b + c
88 // could give rise to the FD {b, c} → a and possibly even {a, b} → c
89 // or {a, c} → b.
90 //
91 // Used in the processing of interesting orders.
93
94 // A less compact form of functional_dependencies, used during building
95 // (FunctionalDependencySet bitmaps are only available after all functional
96 // indexes have been collected and Build() has been called).
98
99 // If this is a suitable semijoin: Contains the grouping given by the
100 // join key. If the rows are in this grouping, then the join optimizer will
101 // consider deduplicating on it and inverting the join. -1 otherwise.
103
104 // Same as ordering_idx_needed_for_semijoin_rewrite, but given to the
105 // RemoveDuplicatesIterator for doing the actual grouping. Allocated
106 // on the MEM_ROOT. Can be empty, in which case a LIMIT 1 would do.
107 Item **semijoin_group = nullptr;
109};
110
111/**
112 A filter of some sort that is not a join condition (those are stored
113 in JoinPredicate objects). AND conditions are typically split up into
114 multiple Predicates.
115 */
116struct Predicate {
118
119 // condition->used_tables(), converted to a NodeMap.
121
122 // tables referred to by the condition, plus any tables whose values
123 // can null any of those tables. (Even when reordering outer joins,
124 // at least one of those tables will still be present on the
125 // left-hand side of the outer join, so this is sufficient.)
126 //
127 // As a special case, we allow setting RAND_TABLE_BIT, even though it
128 // is normally part of a table_map, not a NodeMap.
130
132
133 // Whether this predicate is a join condition after all; it was promoted
134 // to a WHERE predicate since it was part of a cycle (see the comment in
135 // AddCycleEdges()). If it is, it is usually ignored so that we don't
136 // double-apply join conditions -- but if the join in question was not
137 // applied (because the cycle was broken at this point), the predicate
138 // would come into play. This is normally registered on the join itself
139 // (see RelationalExpression::join_predicate_bitmap), but having the bit
140 // on the predicate itself is used to avoid trying to push it down as a
141 // sargable predicate.
142 bool was_join_condition = false;
143
144 // If this is a join condition that came from a multiple equality,
145 // and we have decided to create a mesh from that multiple equality,
146 // returns the index of it into the “multiple_equalities” array
147 // in MakeJoinHypergraph(). (You don't actually need the array to
148 // use this; it's just an opaque index to deduplicate between different
149 // predicates.) Otherwise, -1.
151
152 // See the equivalent fields in JoinPredicate.
155
156 // The list of all subqueries referred to in this predicate, if any.
157 // The optimizer uses this to add their materialized/non-materialized
158 // costs when evaluating filters.
160};
161
165};
166
167/// To indicate that a row estimate is not yet made.
168constexpr double kUnknownRowCount = -1.0;
169
170/**
171 Access paths are a query planning structure that correspond 1:1 to iterators,
172 in that an access path contains pretty much exactly the information
173 needed to instantiate given iterator, plus some information that is only
174 needed during planning, such as costs. (The new join optimizer will extend
175 this somewhat in the future. Some iterators also need the query block,
176 ie., JOIN object, they are part of, but that is implicitly available when
177 constructing the tree.)
178
179 AccessPath objects build on a variant, ie., they can hold an access path of
180 any type (table scan, filter, hash join, sort, etc.), although only one at the
181 same time. Currently, they contain 32 bytes of base information that is common
182 to any access path (type identifier, costs, etc.), and then up to 40 bytes
183 that is type-specific (e.g. for a table scan, the TABLE object). It would be
184 nice if we could squeeze it down to 64 and fit a cache line exactly, but it
185 does not seem to be easy without fairly large contortions.
186
187 We could have solved this by inheritance, but the fixed-size design makes it
188 possible to replace an access path when a better one is found, without
189 introducing a new allocation, which will be important when using them as a
190 planning structure.
191 */
193 // Most of the members are declared with initializers, but bit fields cannot
194 // be declared with initializers until C++20, so we need to define a
195 // constructor to initialize those fields.
196 // TODO(khatlen): When we move to C++20, add initializers to the bit fields
197 // too, and use the default constructor generated by the compiler.
199 : count_examined_rows(false)
200#ifndef NDEBUG
201 ,
202 forced_by_dbug(false)
203#endif
204 {
205 }
206
207 enum Type : uint8_t {
208 // Basic access paths (those with no children, at least nominally).
209 // NOTE: When adding more paths to this section, also update GetBasicTable()
210 // to handle them.
228
229 // Basic access paths that don't correspond to a specific table.
236
237 // Joins.
242
243 // Composite access paths.
259
260 // Access paths that modify tables.
264
265 /// A general enum to describe the safety of a given operation.
266 /// Currently we only use this to describe row IDs, but it can easily
267 /// be reused for safety of updating a table we're reading from
268 /// (the Halloween problem), or just generally unreproducible results
269 /// (e.g. a TABLESAMPLE changing due to external factors).
270 ///
271 /// Less safe values have higher numerical values.
272 enum Safety : uint8_t {
273 /// The given operation is always safe on this access path.
274 SAFE = 0,
275
276 /// The given operation is safe if this access path is scanned once,
277 /// but not if it's scanned multiple times (e.g. used on the inner side
278 /// of a nested-loop join). A typical example of this is a derived table
279 /// or CTE that is rematerialized on each scan, so that references to
280 /// the old values (such as row IDs) are no longer valid.
282
283 /// The given operation is unsafe on this access path, no matter how many
284 /// or few times it's scanned. Often, it may help to materialize it
285 /// (assuming the materialization itself doesn't use the operation
286 /// in question).
287 UNSAFE = 2
288 };
289
290 /// Whether it is safe to get row IDs (for sorting) from this access path.
292
293 /// Whether this access path counts as one that scans a base table,
294 /// and thus should be counted towards examined_rows. It can sometimes
295 /// seem a bit arbitrary which iterators count towards examined_rows
296 /// and which ones do not, so the only canonical reference is the tests.
298
299#ifndef NDEBUG
300 /// Whether this access path is forced preferred over all others by means
301 /// of a SET DEBUG force_subplan_0x... statement.
303#endif
304
305 /// For UPDATE and DELETE statements: The node index of a table which can be
306 /// updated or deleted from immediately as the rows are read from the
307 /// iterator, if this path is only read from once. -1 if there is no such
308 /// table in this path.
309 ///
310 /// Note that this is an index into CostingReceiver's array of nodes, and is
311 /// not necessarily equal to the table number within the query block given by
312 /// Table_ref::tableno().
313 ///
314 /// The table, if any, is currently always the outermost table in the path.
315 ///
316 /// It is possible to have plans where it would be safe to operate
317 /// "immediately" on more than one table. For example, if we do a merge join,
318 /// it is safe to perform immediate deletes on tables on the inner side of the
319 /// join, since both sides are read only once. (However, we currently do not
320 /// support merge joins.)
321 ///
322 /// Another possibility is when the outer table of a nested loop join is
323 /// guaranteed to return at most one row (typically, a unique index lookup
324 /// aka. eq_ref). Then it's safe to delete immediately from both sides of the
325 /// nested loop join. But we don't to this yet.
326 ///
327 /// Hash joins read both sides exactly once, However, with hash joins, the
328 /// scans on the inner tables are not positioned on the correct row when the
329 /// result of the join is returned, so the immediate delete logic will need to
330 /// be changed to reposition the underlying scans before doing the immediate
331 /// deletes. While this can be done, it makes the benefit of immediate deletes
332 /// less obvious for these tables, and it can also be a loss in some cases,
333 /// because we lose the deduplication provided by the Unique object used for
334 /// buffered deletes (the immediate deletes could end up spending time
335 /// repositioning to already deleted rows). So we currently don't attempt to
336 /// do immediate deletes from inner tables of hash joins either.
337 ///
338 /// The outer table of a hash join can be deleted from immediately if the
339 /// inner table fits in memory. If the hash join spills to disk, though,
340 /// neither the rows of the outer table nor the rows of the inner table come
341 /// out in the order of the underlying scan, so it is not safe in general to
342 /// perform immediate deletes on the outer table of a hash join.
343 ///
344 /// If support for immediate operations on multiple tables is added,
345 /// this member could be changed from a node index to a NodeMap.
347
348 /// Which ordering the rows produced by this path follow, if any
349 /// (see interesting_orders.h). This is really a LogicalOrderings::StateIndex,
350 /// but we don't want to add a dependency on interesting_orders.h from
351 /// this file, so we use the base type instead of the typedef here.
353
354 /// If an iterator has been instantiated for this access path, points to the
355 /// iterator. Used for constructing iterators that need to talk to each other
356 /// (e.g. for recursive CTEs, or BKA join), and also for locating timing
357 /// information in EXPLAIN ANALYZE queries.
359
360 /// Expected cost to read all of this access path once; -1.0 for unknown.
361 double cost{-1.0};
362
363 /// Expected cost to initialize this access path; ie., cost to read
364 /// k out of N rows would be init_cost + (k/N) * (cost - init_cost).
365 /// Note that EXPLAIN prints out cost of reading the _first_ row
366 /// because it is easier for the user and also easier to measure in
367 /// EXPLAIN ANALYZE, but it is easier to do calculations with a pure
368 /// initialization cost, so that is what we use in this member.
369 /// -1.0 for unknown.
370 double init_cost{-1.0};
371
372 /// Of init_cost, how much of the initialization needs only to be done
373 /// once per query block. (This is a cost, not a proportion.)
374 /// Ie., if the access path can reuse some its initialization work
375 /// if Init() is called multiple times, this member will be nonzero.
376 /// A typical example is a materialized table with rematerialize=false;
377 /// the second time Init() is called, it's a no-op. Most paths will have
378 /// init_once_cost = 0.0, ie., repeated scans will cost the same.
379 /// We do not intend to use this field to model cache effects.
380 ///
381 /// This is currently not printed in EXPLAIN, only optimizer trace.
382 double init_once_cost{0.0};
383
384 /// Return the cost of scanning the given path for the second time
385 /// (or later) in the given query block. This is really the interesting
386 /// metric, not init_once_cost in itself, but since nearly all paths
387 /// have zero init_once_cost, storing that instead allows us to skip
388 /// a lot of repeated path->init_once_cost = path->init_cost calls
389 /// in the code.
390 double rescan_cost() const { return cost - init_once_cost; }
391
392 /// If no filter, identical to num_output_rows, cost, respectively.
393 /// init_cost is always the same (filters have zero initialization cost).
396
397 /// Bitmap of WHERE predicates that we are including on this access path,
398 /// referring to the “predicates” array internal to the join optimizer.
399 /// Since bit masks are much cheaper to deal with than creating Item
400 /// objects, and we don't invent new conditions during join optimization
401 /// (all of them are known when we begin optimization), we stick to
402 /// manipulating bit masks during optimization, saying which filters will be
403 /// applied at this node (a 1-bit means the filter will be applied here; if
404 /// there are multiple ones, they are ANDed together).
405 ///
406 /// This is used during join optimization only; before iterators are
407 /// created, we will add FILTER access paths to represent these instead,
408 /// removing the dependency on the array. Said FILTER paths are by
409 /// convention created with materialize_subqueries = false, since the by far
410 /// most common case is that there are no subqueries in the predicate.
411 /// In other words, if you wish to represent a filter with
412 /// materialize_subqueries = true, you will need to make an explicit FILTER
413 /// node.
414 ///
415 /// See also nested_loop_join().equijoin_predicates, which is for filters
416 /// being applied _before_ nested-loop joins, but is otherwise the same idea.
418
419 /// Bitmap of sargable join predicates that have already been applied
420 /// in this access path by means of an index lookup (ref access),
421 /// again referring to “predicates”, and thus should not be counted again
422 /// for selectivity. Note that the filter may need to be applied
423 /// nevertheless (especially in case of type conversions); see
424 /// subsumed_sargable_join_predicates.
425 ///
426 /// Since these refer to the same array as filter_predicates, they will
427 /// never overlap with filter_predicates, and so we can reuse the same
428 /// memory using an alias (a union would not be allowed, since OverflowBitset
429 /// is a class with non-trivial default constructor), even though the meaning
430 /// is entirely separate. If N = num_where_predicates in the hypergraph, then
431 /// bits 0..(N-1) belong to filter_predicates, and the rest to
432 /// applied_sargable_join_predicates.
434 return filter_predicates;
435 }
437 return filter_predicates;
438 }
439
440 /// Bitmap of WHERE predicates that touch tables we have joined in,
441 /// but that we could not apply yet (for instance because they reference
442 /// other tables, or because because we could not push them down into
443 /// the nullable side of outer joins). Used during planning only
444 /// (see filter_predicates).
446
447 /// Similar to applied_sargable_join_predicates, bitmap of sargable
448 /// join predicates that have been applied and will subsume the join
449 /// predicate entirely, ie., not only should the selectivity not be
450 /// double-counted, but the predicate itself is redundant and need not
451 /// be applied as a filter. (It is an error to have a bit set here but not
452 /// in applied_sargable_join_predicates.)
454 return delayed_predicates;
455 }
457 return delayed_predicates;
458 }
459
460 /// If nonzero, a bitmap of other tables whose joined-in rows must already be
461 /// loaded when rows from this access path are evaluated; that is, this
462 /// access path must be put on the inner side of a nested-loop join (or
463 /// multiple such joins) where the outer side includes all of the given
464 /// tables.
465 ///
466 /// The most obvious case for this is dependent tables in LATERAL, but a more
467 /// common case is when we have pushed join conditions referring to those
468 /// tables; e.g., if this access path represents t1 and we have a condition
469 /// t1.x=t2.x that is pushed down into an index lookup (ref access), t2 will
470 /// be set in this bitmap. We can still join in other tables, deferring t2,
471 /// but the bit(s) will then propagate, and we cannot be on the right side of
472 /// a hash join until parameter_tables is zero again. (Also see
473 /// DisallowParameterizedJoinPath() for when we disallow such deferring,
474 /// as an optimization.)
475 ///
476 /// As a special case, we allow setting RAND_TABLE_BIT, even though it
477 /// is normally part of a table_map, not a NodeMap. In this case, it specifies
478 /// that the access path is entirely noncachable, because it depends on
479 /// something nondeterministic or an outer reference, and thus can never be on
480 /// the right side of a hash join, ever.
482
483 /// Auxiliary data used by a secondary storage engine while processing the
484 /// access path during optimization and execution. The secondary storage
485 /// engine is free to store any useful information in this member, for example
486 /// extra statistics or cost estimates. The data pointed to is fully owned by
487 /// the secondary storage engine, and it is the responsibility of the
488 /// secondary engine to manage the memory and make sure it is properly
489 /// destroyed.
490 void *secondary_engine_data{nullptr};
491
492 // Accessors for the union below.
493 auto &table_scan() {
494 assert(type == TABLE_SCAN);
495 return u.table_scan;
496 }
497 const auto &table_scan() const {
498 assert(type == TABLE_SCAN);
499 return u.table_scan;
500 }
501 auto &index_scan() {
502 assert(type == INDEX_SCAN);
503 return u.index_scan;
504 }
505 const auto &index_scan() const {
506 assert(type == INDEX_SCAN);
507 return u.index_scan;
508 }
509 auto &ref() {
510 assert(type == REF);
511 return u.ref;
512 }
513 const auto &ref() const {
514 assert(type == REF);
515 return u.ref;
516 }
517 auto &ref_or_null() {
518 assert(type == REF_OR_NULL);
519 return u.ref_or_null;
520 }
521 const auto &ref_or_null() const {
522 assert(type == REF_OR_NULL);
523 return u.ref_or_null;
524 }
525 auto &eq_ref() {
526 assert(type == EQ_REF);
527 return u.eq_ref;
528 }
529 const auto &eq_ref() const {
530 assert(type == EQ_REF);
531 return u.eq_ref;
532 }
534 assert(type == PUSHED_JOIN_REF);
535 return u.pushed_join_ref;
536 }
537 const auto &pushed_join_ref() const {
538 assert(type == PUSHED_JOIN_REF);
539 return u.pushed_join_ref;
540 }
542 assert(type == FULL_TEXT_SEARCH);
543 return u.full_text_search;
544 }
545 const auto &full_text_search() const {
546 assert(type == FULL_TEXT_SEARCH);
547 return u.full_text_search;
548 }
549 auto &const_table() {
550 assert(type == CONST_TABLE);
551 return u.const_table;
552 }
553 const auto &const_table() const {
554 assert(type == CONST_TABLE);
555 return u.const_table;
556 }
557 auto &mrr() {
558 assert(type == MRR);
559 return u.mrr;
560 }
561 const auto &mrr() const {
562 assert(type == MRR);
563 return u.mrr;
564 }
565 auto &follow_tail() {
566 assert(type == FOLLOW_TAIL);
567 return u.follow_tail;
568 }
569 const auto &follow_tail() const {
570 assert(type == FOLLOW_TAIL);
571 return u.follow_tail;
572 }
574 assert(type == INDEX_RANGE_SCAN);
575 return u.index_range_scan;
576 }
577 const auto &index_range_scan() const {
578 assert(type == INDEX_RANGE_SCAN);
579 return u.index_range_scan;
580 }
581 auto &index_merge() {
582 assert(type == INDEX_MERGE);
583 return u.index_merge;
584 }
585 const auto &index_merge() const {
586 assert(type == INDEX_MERGE);
587 return u.index_merge;
588 }
590 assert(type == ROWID_INTERSECTION);
591 return u.rowid_intersection;
592 }
593 const auto &rowid_intersection() const {
594 assert(type == ROWID_INTERSECTION);
595 return u.rowid_intersection;
596 }
597 auto &rowid_union() {
598 assert(type == ROWID_UNION);
599 return u.rowid_union;
600 }
601 const auto &rowid_union() const {
602 assert(type == ROWID_UNION);
603 return u.rowid_union;
604 }
606 assert(type == INDEX_SKIP_SCAN);
607 return u.index_skip_scan;
608 }
609 const auto &index_skip_scan() const {
610 assert(type == INDEX_SKIP_SCAN);
611 return u.index_skip_scan;
612 }
614 assert(type == GROUP_INDEX_SKIP_SCAN);
615 return u.group_index_skip_scan;
616 }
617 const auto &group_index_skip_scan() const {
618 assert(type == GROUP_INDEX_SKIP_SCAN);
619 return u.group_index_skip_scan;
620 }
623 return u.dynamic_index_range_scan;
624 }
625 const auto &dynamic_index_range_scan() const {
627 return u.dynamic_index_range_scan;
628 }
631 return u.materialized_table_function;
632 }
633 const auto &materialized_table_function() const {
635 return u.materialized_table_function;
636 }
638 assert(type == UNQUALIFIED_COUNT);
639 return u.unqualified_count;
640 }
641 const auto &unqualified_count() const {
642 assert(type == UNQUALIFIED_COUNT);
643 return u.unqualified_count;
644 }
646 assert(type == TABLE_VALUE_CONSTRUCTOR);
647 return u.table_value_constructor;
648 }
649 const auto &table_value_constructor() const {
650 assert(type == TABLE_VALUE_CONSTRUCTOR);
651 return u.table_value_constructor;
652 }
654 assert(type == FAKE_SINGLE_ROW);
655 return u.fake_single_row;
656 }
657 const auto &fake_single_row() const {
658 assert(type == FAKE_SINGLE_ROW);
659 return u.fake_single_row;
660 }
661 auto &zero_rows() {
662 assert(type == ZERO_ROWS);
663 return u.zero_rows;
664 }
665 const auto &zero_rows() const {
666 assert(type == ZERO_ROWS);
667 return u.zero_rows;
668 }
670 assert(type == ZERO_ROWS_AGGREGATED);
671 return u.zero_rows_aggregated;
672 }
673 const auto &zero_rows_aggregated() const {
674 assert(type == ZERO_ROWS_AGGREGATED);
675 return u.zero_rows_aggregated;
676 }
677 auto &hash_join() {
678 assert(type == HASH_JOIN);
679 return u.hash_join;
680 }
681 const auto &hash_join() const {
682 assert(type == HASH_JOIN);
683 return u.hash_join;
684 }
685 auto &bka_join() {
686 assert(type == BKA_JOIN);
687 return u.bka_join;
688 }
689 const auto &bka_join() const {
690 assert(type == BKA_JOIN);
691 return u.bka_join;
692 }
694 assert(type == NESTED_LOOP_JOIN);
695 return u.nested_loop_join;
696 }
697 const auto &nested_loop_join() const {
698 assert(type == NESTED_LOOP_JOIN);
699 return u.nested_loop_join;
700 }
703 return u.nested_loop_semijoin_with_duplicate_removal;
704 }
707 return u.nested_loop_semijoin_with_duplicate_removal;
708 }
709 auto &filter() {
710 assert(type == FILTER);
711 return u.filter;
712 }
713 const auto &filter() const {
714 assert(type == FILTER);
715 return u.filter;
716 }
717 auto &sort() {
718 assert(type == SORT);
719 return u.sort;
720 }
721 const auto &sort() const {
722 assert(type == SORT);
723 return u.sort;
724 }
725 auto &aggregate() {
726 assert(type == AGGREGATE);
727 return u.aggregate;
728 }
729 const auto &aggregate() const {
730 assert(type == AGGREGATE);
731 return u.aggregate;
732 }
734 assert(type == TEMPTABLE_AGGREGATE);
735 return u.temptable_aggregate;
736 }
737 const auto &temptable_aggregate() const {
738 assert(type == TEMPTABLE_AGGREGATE);
739 return u.temptable_aggregate;
740 }
741 auto &limit_offset() {
742 assert(type == LIMIT_OFFSET);
743 return u.limit_offset;
744 }
745 const auto &limit_offset() const {
746 assert(type == LIMIT_OFFSET);
747 return u.limit_offset;
748 }
749 auto &stream() {
750 assert(type == STREAM);
751 return u.stream;
752 }
753 const auto &stream() const {
754 assert(type == STREAM);
755 return u.stream;
756 }
757 auto &materialize() {
758 assert(type == MATERIALIZE);
759 return u.materialize;
760 }
761 const auto &materialize() const {
762 assert(type == MATERIALIZE);
763 return u.materialize;
764 }
767 return u.materialize_information_schema_table;
768 }
771 return u.materialize_information_schema_table;
772 }
773 auto &append() {
774 assert(type == APPEND);
775 return u.append;
776 }
777 const auto &append() const {
778 assert(type == APPEND);
779 return u.append;
780 }
781 auto &window() {
782 assert(type == WINDOW);
783 return u.window;
784 }
785 const auto &window() const {
786 assert(type == WINDOW);
787 return u.window;
788 }
789 auto &weedout() {
790 assert(type == WEEDOUT);
791 return u.weedout;
792 }
793 const auto &weedout() const {
794 assert(type == WEEDOUT);
795 return u.weedout;
796 }
798 assert(type == REMOVE_DUPLICATES);
799 return u.remove_duplicates;
800 }
801 const auto &remove_duplicates() const {
802 assert(type == REMOVE_DUPLICATES);
803 return u.remove_duplicates;
804 }
807 return u.remove_duplicates_on_index;
808 }
809 const auto &remove_duplicates_on_index() const {
811 return u.remove_duplicates_on_index;
812 }
813 auto &alternative() {
814 assert(type == ALTERNATIVE);
815 return u.alternative;
816 }
817 const auto &alternative() const {
818 assert(type == ALTERNATIVE);
819 return u.alternative;
820 }
822 assert(type == CACHE_INVALIDATOR);
823 return u.cache_invalidator;
824 }
825 const auto &cache_invalidator() const {
826 assert(type == CACHE_INVALIDATOR);
827 return u.cache_invalidator;
828 }
829 auto &delete_rows() {
830 assert(type == DELETE_ROWS);
831 return u.delete_rows;
832 }
833 const auto &delete_rows() const {
834 assert(type == DELETE_ROWS);
835 return u.delete_rows;
836 }
837 auto &update_rows() {
838 assert(type == UPDATE_ROWS);
839 return u.update_rows;
840 }
841 const auto &update_rows() const {
842 assert(type == UPDATE_ROWS);
843 return u.update_rows;
844 }
845
846 double num_output_rows() const { return m_num_output_rows; }
847
848 void set_num_output_rows(double val) { m_num_output_rows = val; }
849
850 private:
851 /// Expected number of output rows, -1.0 for unknown.
853
854 // We'd prefer if this could be an std::variant, but we don't have C++17 yet.
855 // It is private to force all access to be through the type-checking
856 // accessors.
857 //
858 // For information about the meaning of each value, see the corresponding
859 // row iterator constructors.
860 union {
861 struct {
864 struct {
865 TABLE *table;
866 int idx;
870 struct {
871 TABLE *table;
873 bool use_order;
874 bool reverse;
876 struct {
877 TABLE *table;
879 bool use_order;
881 struct {
882 TABLE *table;
885 struct {
886 TABLE *table;
888 bool use_order;
891 struct {
892 TABLE *table;
894 bool use_order;
898 struct {
899 TABLE *table;
902 struct {
903 TABLE *table;
909 struct {
910 TABLE *table;
912 struct {
913 // The key part(s) we are scanning on. Note that this may be an array.
914 // You can get the table we are working on by looking into
915 // used_key_parts[0].field->table (it is not stored directly, to avoid
916 // going over the AccessPath size limits).
918
919 // The actual ranges we are scanning over (originally derived from “key”).
920 // Not a Bounds_checked_array, to save 4 bytes on the length.
922 unsigned num_ranges;
923
924 unsigned mrr_flags;
925 unsigned mrr_buf_size;
926
927 // Which index (in the TABLE) we are scanning over, and how many of its
928 // key parts we are using.
929 unsigned index;
931
932 // If true, the scan can return rows in rowid order.
934
935 // If true, the scan _should_ return rows in rowid order.
936 // Should only be set if can_be_used_for_ror == true.
938
939 // If true, this plan can be used for index merge scan.
941
942 // See row intersection for more details.
944
945 // Whether we are scanning over a geometry key part.
946 bool geometry : 1;
947
948 // Whether we need a reverse scan. Only supported if geometry == false.
949 bool reverse : 1;
950
951 // For a reverse scan, if we are using extended key parts. It is needed,
952 // to set correct flags when retrieving records.
955 struct {
956 TABLE *table;
961 struct {
962 TABLE *table;
964
965 // Clustered primary key scan, if any.
967
968 bool forced_by_hint;
971
972 // If true, the first child scan should reuse table->file instead of
973 // creating its own. This is true if the intersection is the topmost
974 // range scan, but _not_ if it's below a union. (The reasons for this
975 // are unknown.) It can also be negated by logic involving
976 // retrieve_full_rows and is_covering, again for unknown reasons.
977 //
978 // This is not only for performance; multi-table delete has a hidden
979 // dependency on this behavior when running against certain types of
980 // tables (e.g. MyISAM), as it assumes table->file is correctly positioned
981 // when deleting (and not all table types can transfer the position of one
982 // handler to another by using position()).
983 bool reuse_handler;
984
985 // true if no row retrieval phase is necessary.
988 struct {
989 TABLE *table;
991 bool forced_by_hint;
993 struct {
994 TABLE *table;
995 unsigned index;
996 unsigned num_used_key_parts;
997 bool forced_by_hint;
998
999 // Large, and has nontrivial destructors, so split out into
1000 // its own allocation.
1003 struct {
1004 TABLE *table;
1005 unsigned index;
1006 unsigned num_used_key_parts;
1007 bool forced_by_hint;
1008
1009 // Large, so split out into its own allocation.
1012 struct {
1013 TABLE *table;
1014 QEP_TAB *qep_tab; // Used only for buffering.
1016 struct {
1017 TABLE *table;
1021 struct {
1023
1024 struct {
1025 // No members (implicit from the JOIN).
1027 struct {
1028 // No members.
1030 struct {
1031 // The child is optional. It is only used for keeping track of which
1032 // tables are pruned away by this path, and it is only needed when this
1033 // path is on the inner side of an outer join. See ZeroRowsIterator for
1034 // details. The child of a ZERO_ROWS access path will not be visited by
1035 // WalkAccessPaths(). It will be visited by WalkTablesUnderAccessPath()
1036 // only if called with include_pruned_tables = true. No iterator is
1037 // created for the child, and the child is not shown by EXPLAIN.
1039 // Used for EXPLAIN only.
1040 // TODO(sgunders): make an enum.
1041 const char *cause;
1043 struct {
1044 // Used for EXPLAIN only.
1045 // TODO(sgunders): make an enum.
1046 const char *cause;
1048
1049 struct {
1053 bool store_rowids; // Whether we are below a weedout or not.
1057 struct {
1062 bool store_rowids; // Whether we are below a weedout or not.
1065 struct {
1067 JoinType join_type; // Somewhat redundant wrt. join_predicate.
1071
1072 // Equijoin filters to apply before the join, if any.
1073 // Indexes into join_predicate->expr->equijoin_conditions.
1074 // Non-equijoin conditions are always applied.
1075 // If already_expanded_predicates is true, do not re-expand.
1077
1078 // NOTE: Due to the nontrivial constructor on equijoin_predicates,
1079 // this struct needs an initializer, or the union would not be
1080 // default-constructible. If we need more than one union member
1081 // with such an initializer, we would probably need to change
1082 // equijoin_predicates into a uint64_t type-punned to an OverflowBitset.
1083 } nested_loop_join = {nullptr, nullptr, JoinType::INNER, false, false,
1084 nullptr, {}};
1085 struct {
1087 const TABLE *table;
1089 size_t key_len;
1091
1092 struct {
1095
1096 // This parameter, unlike nearly all others, is not passed to the the
1097 // actual iterator. Instead, if true, it signifies that when creating
1098 // the iterator, all materializable subqueries in “condition” should be
1099 // materialized (with any in2exists condition removed first). In the
1100 // very rare case that there are two or more such subqueries, this is
1101 // an all-or-nothing decision, for simplicity.
1102 //
1103 // See FinalizeMaterializedSubqueries().
1106 struct {
1110
1111 // If filesort is nullptr: A new filesort will be created at the
1112 // end of optimization, using this order and flags. Otherwise: Only
1113 // used by EXPLAIN.
1120 struct {
1124 struct {
1127 TABLE *table;
1131 struct {
1133 ha_rows limit;
1137 // Only used when the LIMIT is on a UNION with SQL_CALC_FOUND_ROWS.
1138 // See Query_expression::send_records.
1141 struct {
1145 TABLE *table;
1147 int ref_slice;
1149 struct {
1150 // NOTE: The only legal access paths within table_path are
1151 // TABLE_SCAN, REF, REF_OR_NULL, EQ_REF, ALTERNATIVE and
1152 // CONST_TABLE (the latter is somewhat nonsensical).
1154
1155 // Large, and has nontrivial destructors, so split out
1156 // into its own allocation.
1158 /** The total cost of executing the queries that we materialize.*/
1161 struct {
1164 Item *condition;
1166 struct {
1169 struct {
1174 int ref_slice;
1177 struct {
1182 struct {
1187 struct {
1189 TABLE *table;
1190 KEY *key;
1193 struct {
1195
1196 // For the ref.
1200 struct {
1202 const char *name;
1204 struct {
1209 struct {
1214 } u;
1215};
1216static_assert(std::is_trivially_destructible<AccessPath>::value,
1217 "AccessPath must be trivially destructible, as it is allocated "
1218 "on the MEM_ROOT and not wrapped in unique_ptr_destroy_only"
1219 "(because multiple candidates during planning could point to "
1220 "the same access paths, and refcounting would be expensive)");
1221static_assert(sizeof(AccessPath) <= 144,
1222 "We are creating a lot of access paths in the join "
1223 "optimizer, so be sure not to bloat it without noticing. "
1224 "(96 bytes for the base, 48 bytes for the variant.)");
1225
1226inline void CopyBasicProperties(const AccessPath &from, AccessPath *to) {
1228 to->cost = from.cost;
1229 to->init_cost = from.init_cost;
1230 to->init_once_cost = from.init_once_cost;
1232 to->safe_for_rowid = from.safe_for_rowid;
1233 to->ordering_state = from.ordering_state;
1234}
1235
1236// Trivial factory functions for all of the types of access paths above.
1237
1239 bool count_examined_rows) {
1240 AccessPath *path = new (thd->mem_root) AccessPath;
1242 path->count_examined_rows = count_examined_rows;
1243 path->table_scan().table = table;
1244 return path;
1245}
1246
1247inline AccessPath *NewIndexScanAccessPath(THD *thd, TABLE *table, int idx,
1248 bool use_order, bool reverse,
1249 bool count_examined_rows) {
1250 AccessPath *path = new (thd->mem_root) AccessPath;
1252 path->count_examined_rows = count_examined_rows;
1253 path->index_scan().table = table;
1254 path->index_scan().idx = idx;
1255 path->index_scan().use_order = use_order;
1256 path->index_scan().reverse = reverse;
1257 return path;
1258}
1259
1261 bool use_order, bool reverse,
1262 bool count_examined_rows) {
1263 AccessPath *path = new (thd->mem_root) AccessPath;
1264 path->type = AccessPath::REF;
1265 path->count_examined_rows = count_examined_rows;
1266 path->ref().table = table;
1267 path->ref().ref = ref;
1268 path->ref().use_order = use_order;
1269 path->ref().reverse = reverse;
1270 return path;
1271}
1272
1274 Index_lookup *ref, bool use_order,
1275 bool count_examined_rows) {
1276 AccessPath *path = new (thd->mem_root) AccessPath;
1278 path->count_examined_rows = count_examined_rows;
1279 path->ref_or_null().table = table;
1280 path->ref_or_null().ref = ref;
1281 path->ref_or_null().use_order = use_order;
1282 return path;
1283}
1284
1286 bool count_examined_rows) {
1287 AccessPath *path = new (thd->mem_root) AccessPath;
1288 path->type = AccessPath::EQ_REF;
1289 path->count_examined_rows = count_examined_rows;
1290 path->eq_ref().table = table;
1291 path->eq_ref().ref = ref;
1292 return path;
1293}
1294
1296 Index_lookup *ref, bool use_order,
1297 bool is_unique,
1298 bool count_examined_rows) {
1299 AccessPath *path = new (thd->mem_root) AccessPath;
1301 path->count_examined_rows = count_examined_rows;
1302 path->pushed_join_ref().table = table;
1303 path->pushed_join_ref().ref = ref;
1304 path->pushed_join_ref().use_order = use_order;
1305 path->pushed_join_ref().is_unique = is_unique;
1306 return path;
1307}
1308
1311 Item_func_match *ft_func,
1312 bool use_order, bool use_limit,
1313 bool count_examined_rows) {
1314 AccessPath *path = new (thd->mem_root) AccessPath;
1316 path->count_examined_rows = count_examined_rows;
1317 path->full_text_search().table = table;
1318 path->full_text_search().ref = ref;
1319 path->full_text_search().use_order = use_order;
1320 path->full_text_search().use_limit = use_limit;
1321 path->full_text_search().ft_func = ft_func;
1322 return path;
1323}
1324
1327 bool count_examined_rows) {
1328 AccessPath *path = new (thd->mem_root) AccessPath;
1330 path->count_examined_rows = count_examined_rows;
1331 path->set_num_output_rows(1.0);
1332 path->cost = 0.0;
1333 path->init_cost = 0.0;
1334 path->init_once_cost = 0.0;
1335 path->const_table().table = table;
1336 path->const_table().ref = ref;
1337 return path;
1338}
1339
1341 int mrr_flags) {
1342 AccessPath *path = new (thd->mem_root) AccessPath;
1343 path->type = AccessPath::MRR;
1344 path->mrr().table = table;
1345 path->mrr().ref = ref;
1346 path->mrr().mrr_flags = mrr_flags;
1347
1348 // This will be filled in when the BKA iterator is created.
1349 path->mrr().bka_path = nullptr;
1350
1351 return path;
1352}
1353
1355 bool count_examined_rows) {
1356 AccessPath *path = new (thd->mem_root) AccessPath;
1358 path->count_examined_rows = count_examined_rows;
1359 path->follow_tail().table = table;
1360 return path;
1361}
1362
1364 THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows) {
1365 AccessPath *path = new (thd->mem_root) AccessPath;
1367 path->count_examined_rows = count_examined_rows;
1368 path->dynamic_index_range_scan().table = table;
1369 path->dynamic_index_range_scan().qep_tab = qep_tab;
1370 return path;
1371}
1372
1374 THD *thd, TABLE *table, Table_function *table_function,
1375 AccessPath *table_path) {
1376 AccessPath *path = new (thd->mem_root) AccessPath;
1378 path->materialized_table_function().table = table;
1379 path->materialized_table_function().table_function = table_function;
1380 path->materialized_table_function().table_path = table_path;
1381 return path;
1382}
1383
1385 AccessPath *path = new (thd->mem_root) AccessPath;
1387 return path;
1388}
1389
1391 AccessPath *path = new (thd->mem_root) AccessPath;
1393 // The iterator keeps track of which row it is at in examined_rows,
1394 // so we always need to give it the pointer.
1395 path->count_examined_rows = true;
1396 return path;
1397}
1398
1400 THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table,
1401 KEY *key, size_t key_len) {
1402 AccessPath *path = new (thd->mem_root) AccessPath;
1404 path->nested_loop_semijoin_with_duplicate_removal().outer = outer;
1405 path->nested_loop_semijoin_with_duplicate_removal().inner = inner;
1406 path->nested_loop_semijoin_with_duplicate_removal().table = table;
1407 path->nested_loop_semijoin_with_duplicate_removal().key = key;
1408 path->nested_loop_semijoin_with_duplicate_removal().key_len = key_len;
1409 return path;
1410}
1411
1413 Item *condition) {
1414 AccessPath *path = new (thd->mem_root) AccessPath;
1415 path->type = AccessPath::FILTER;
1416 path->filter().child = child;
1417 path->filter().condition = condition;
1418 path->filter().materialize_subqueries = false;
1419 return path;
1420}
1421
1422// Not inline, because it needs access to filesort internals
1423// (which are forward-declared in this file).
1425 ORDER *order, bool count_examined_rows);
1426
1428 bool rollup) {
1429 AccessPath *path = new (thd->mem_root) AccessPath;
1431 path->aggregate().child = child;
1432 path->aggregate().rollup = rollup;
1433 return path;
1434}
1435
1437 THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param,
1438 TABLE *table, AccessPath *table_path, int ref_slice) {
1439 AccessPath *path = new (thd->mem_root) AccessPath;
1441 path->temptable_aggregate().subquery_path = subquery_path;
1442 path->temptable_aggregate().temp_table_param = temp_table_param;
1443 path->temptable_aggregate().table = table;
1444 path->temptable_aggregate().table_path = table_path;
1445 path->temptable_aggregate().ref_slice = ref_slice;
1446 return path;
1447}
1448
1450 ha_rows limit, ha_rows offset,
1451 bool count_all_rows,
1452 bool reject_multiple_rows,
1453 ha_rows *send_records_override) {
1455 AccessPath *path = new (thd->mem_root) AccessPath;
1457 path->immediate_update_delete_table = child->immediate_update_delete_table;
1458 path->limit_offset().child = child;
1459 path->limit_offset().limit = limit;
1460 path->limit_offset().offset = offset;
1461 path->limit_offset().count_all_rows = count_all_rows;
1462 path->limit_offset().reject_multiple_rows = reject_multiple_rows;
1463 path->limit_offset().send_records_override = send_records_override;
1464 path->ordering_state = child->ordering_state;
1466 return path;
1467}
1468
1470 bool count_examined_rows) {
1471 AccessPath *path = new (thd->mem_root) AccessPath;
1473 path->count_examined_rows = count_examined_rows;
1474 path->set_num_output_rows(1.0);
1475 path->cost = 0.0;
1476 path->init_cost = 0.0;
1477 path->init_once_cost = 0.0;
1478 return path;
1479}
1480
1482 const char *cause) {
1483 AccessPath *path = new (thd->mem_root) AccessPath;
1485 path->zero_rows().child = child;
1486 path->zero_rows().cause = cause;
1487 path->set_num_output_rows(0.0);
1488 path->cost = 0.0;
1489 path->init_cost = 0.0;
1490 path->init_once_cost = 0.0;
1491 path->num_output_rows_before_filter = 0.0;
1492 path->cost_before_filter = 0.0;
1493 return path;
1494}
1495
1496inline AccessPath *NewZeroRowsAccessPath(THD *thd, const char *cause) {
1497 return NewZeroRowsAccessPath(thd, /*child=*/nullptr, cause);
1498}
1499
1501 const char *cause) {
1502 AccessPath *path = new (thd->mem_root) AccessPath;
1504 path->zero_rows_aggregated().cause = cause;
1505 path->set_num_output_rows(1.0);
1506 path->cost = 0.0;
1507 path->init_cost = 0.0;
1508 return path;
1509}
1510
1512 JOIN *join,
1513 Temp_table_param *temp_table_param,
1514 TABLE *table, int ref_slice) {
1515 AccessPath *path = new (thd->mem_root) AccessPath;
1516 path->type = AccessPath::STREAM;
1517 path->stream().child = child;
1518 path->stream().join = join;
1519 path->stream().temp_table_param = temp_table_param;
1520 path->stream().table = table;
1521 path->stream().ref_slice = ref_slice;
1522 // Will be set later if we get a weedout access path as parent.
1523 path->stream().provide_rowid = false;
1524 return path;
1525}
1526
1529 JOIN *join, bool copy_items,
1530 Temp_table_param *temp_table_param) {
1531 assert(path != nullptr);
1533 MaterializePathParameters::QueryBlock &query_block = array[0];
1534 query_block.subquery_path = path;
1535 query_block.select_number = select_number;
1536 query_block.join = join;
1537 query_block.disable_deduplication_by_hash_field = false;
1538 query_block.copy_items = copy_items;
1539 query_block.temp_table_param = temp_table_param;
1540 return array;
1541}
1542
1544 THD *thd,
1546 Mem_root_array<const AccessPath *> *invalidators, TABLE *table,
1547 AccessPath *table_path, Common_table_expr *cte, Query_expression *unit,
1548 int ref_slice, bool rematerialize, ha_rows limit_rows,
1549 bool reject_multiple_rows) {
1552 param->query_blocks = std::move(query_blocks);
1553 if (rematerialize) {
1554 // There's no point in adding invalidators if we're rematerializing
1555 // every time anyway.
1556 param->invalidators = nullptr;
1557 } else {
1558 param->invalidators = invalidators;
1559 }
1560 param->table = table;
1561 param->cte = cte;
1562 param->unit = unit;
1563 param->ref_slice = ref_slice;
1564 param->rematerialize = rematerialize;
1565 param->limit_rows = (table == nullptr || table->is_union_or_table()
1566 ? limit_rows
1567 :
1568 // INTERSECT, EXCEPT: Enforced by TableScanIterator,
1569 // see its constructor
1570 HA_POS_ERROR);
1571 param->reject_multiple_rows = reject_multiple_rows;
1572
1573#ifndef NDEBUG
1574 for (MaterializePathParameters::QueryBlock &query_block :
1575 param->query_blocks) {
1576 assert(query_block.subquery_path != nullptr);
1577 }
1578#endif
1579
1580 AccessPath *path = new (thd->mem_root) AccessPath;
1582 path->materialize().table_path = table_path;
1583 path->materialize().param = param;
1584 path->materialize().subquery_cost = -1.0;
1585 if (rematerialize) {
1586 path->safe_for_rowid = AccessPath::SAFE_IF_SCANNED_ONCE;
1587 } else {
1588 // The default; this is just to be explicit in the code.
1589 path->safe_for_rowid = AccessPath::SAFE;
1590 }
1591 return path;
1592}
1593
1595 THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition) {
1596 AccessPath *path = new (thd->mem_root) AccessPath;
1598 path->materialize_information_schema_table().table_path = table_path;
1599 path->materialize_information_schema_table().table_list = table_list;
1600 path->materialize_information_schema_table().condition = condition;
1601 return path;
1602}
1603
1604// The Mem_root_array must be allocated on a MEM_ROOT that lives at least for as
1605// long as the access path.
1607 THD *thd, Mem_root_array<AppendPathParameters> *children) {
1608 AccessPath *path = new (thd->mem_root) AccessPath;
1609 path->type = AccessPath::APPEND;
1610 path->append().children = children;
1611 path->cost = 0.0;
1612 path->init_cost = 0.0;
1613 path->init_once_cost = 0.0;
1614 double num_output_rows = 0.0;
1615 for (const AppendPathParameters &child : *children) {
1616 path->cost += child.path->cost;
1617 path->init_cost += child.path->init_cost;
1618 path->init_once_cost += child.path->init_once_cost;
1619 num_output_rows += child.path->num_output_rows();
1620 }
1621 path->set_num_output_rows(num_output_rows);
1622 return path;
1623}
1624
1626 Window *window,
1627 Temp_table_param *temp_table_param,
1628 int ref_slice, bool needs_buffering) {
1629 AccessPath *path = new (thd->mem_root) AccessPath;
1630 path->type = AccessPath::WINDOW;
1631 path->window().child = child;
1632 path->window().window = window;
1633 path->window().temp_table = nullptr;
1634 path->window().temp_table_param = temp_table_param;
1635 path->window().ref_slice = ref_slice;
1636 path->window().needs_buffering = needs_buffering;
1637 return path;
1638}
1639
1641 SJ_TMP_TABLE *weedout_table) {
1642 AccessPath *path = new (thd->mem_root) AccessPath;
1643 path->type = AccessPath::WEEDOUT;
1644 path->weedout().child = child;
1645 path->weedout().weedout_table = weedout_table;
1646 path->weedout().tables_to_get_rowid_for =
1647 0; // Must be handled by the caller.
1648 return path;
1649}
1650
1652 Item **group_items,
1653 int group_items_size) {
1654 AccessPath *path = new (thd->mem_root) AccessPath;
1656 path->remove_duplicates().child = child;
1657 path->remove_duplicates().group_items = group_items;
1658 path->remove_duplicates().group_items_size = group_items_size;
1659 return path;
1660}
1661
1663 THD *thd, AccessPath *child, TABLE *table, KEY *key,
1664 unsigned loosescan_key_len) {
1665 AccessPath *path = new (thd->mem_root) AccessPath;
1667 path->remove_duplicates_on_index().child = child;
1668 path->remove_duplicates_on_index().table = table;
1669 path->remove_duplicates_on_index().key = key;
1670 path->remove_duplicates_on_index().loosescan_key_len = loosescan_key_len;
1671 return path;
1672}
1673
1675 AccessPath *table_scan_path,
1676 Index_lookup *used_ref) {
1677 AccessPath *path = new (thd->mem_root) AccessPath;
1679 path->alternative().table_scan_path = table_scan_path;
1680 path->alternative().child = child;
1681 path->alternative().used_ref = used_ref;
1682 return path;
1683}
1684
1686 const char *name) {
1687 AccessPath *path = new (thd->mem_root) AccessPath;
1689 path->cache_invalidator().child = child;
1690 path->cache_invalidator().name = name;
1691 return path;
1692}
1693
1695 table_map delete_tables,
1696 table_map immediate_tables);
1697
1699 table_map delete_tables,
1700 table_map immediate_tables);
1701
1702/**
1703 Modifies "path" and the paths below it so that they provide row IDs for
1704 all tables.
1705 */
1707
1708/**
1709 If the path is a FILTER path marked that subqueries are to be materialized,
1710 do so. If not, do nothing.
1711
1712 It is important that this is not called until the entire plan is ready;
1713 not just when planning a single query block. The reason is that a query
1714 block A with materializable subqueries may itself be part of a materializable
1715 subquery B, so if one calls this when planning A, the subqueries in A will
1716 irrevocably be materialized, even if that is not the optimal plan given B.
1717 Thus, this is done when creating iterators.
1718 */
1720
1723 bool eligible_for_batch_mode);
1724
1725// A short form of CreateIteratorFromAccessPath() that implicitly uses the THD's
1726// MEM_ROOT for storage, which is nearly always what you want. (The only caller
1727// that does anything else is DynamicRangeIterator.)
1729 THD *thd, AccessPath *path, JOIN *join, bool eligible_for_batch_mode) {
1731 eligible_for_batch_mode);
1732}
1733
1734void SetCostOnTableAccessPath(const Cost_model_server &cost_model,
1735 const POSITION *pos, bool is_after_filter,
1736 AccessPath *path);
1737
1738/**
1739 Return the TABLE* referred from 'path' if it is a basic access path,
1740 else a nullptr is returned. Temporary tables, such as those used by
1741 sorting, aggregate and subquery materialization are not returned.
1742*/
1744
1745/**
1746 Returns a map of all tables read when `path` or any of its children are
1747 executed. Only iterators that are part of the same query block as `path`
1748 are considered.
1749
1750 If a table is read that doesn't have a map, specifically the temporary
1751 tables made as part of materialization within the same query block,
1752 RAND_TABLE_BIT will be set as a convention and none of that access path's
1753 children will be included in the map. In this case, the caller will need to
1754 manually go in and find said access path, to ask it for its TABLE object.
1755
1756 If include_pruned_tables = true, tables that are hidden under a ZERO_ROWS
1757 access path (ie., pruned away due to impossible join conditions) will be
1758 included in the map. This is normally what you want, as those tables need to
1759 be included whenever you store NULL flags and the likes, but if you don't
1760 want them (perhaps to specifically check for conditions referring to pruned
1761 tables), you can set it to false.
1762 */
1763table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables);
1764
1765/**
1766 Find the list of all tables used by this root, stopping at materializations.
1767 Used for knowing which tables to sort.
1768 */
1770
1771/**
1772 For each access path in the (sub)tree rooted at “path”, expand any use of
1773 “filter_predicates” into newly-inserted FILTER access paths, using the given
1774 predicate list. This is used after finding an optimal set of access paths,
1775 to normalize the tree so that the remaining consumers do not need to worry
1776 about filter_predicates and cost_before_filter.
1777
1778 “join” is the join that “path” is part of.
1779 */
1781 const Mem_root_array<Predicate> &predicates,
1782 unsigned num_where_predicates);
1783
1784/**
1785 Extracts the Item expression from the given “filter_predicates” corresponding
1786 to the given “mask”.
1787 */
1790 int num_where_predicates);
1791
1792/// Like ExpandFilterAccessPaths(), but expands only the single access path
1793/// at “path”.
1795 const Mem_root_array<Predicate> &predicates,
1796 unsigned num_where_predicates);
1797
1798/// Returns the tables that are part of a hash join.
1800
1801#endif // SQL_JOIN_OPTIMIZER_ACCESS_PATH_H
AccessPath * NewStreamingAccessPath(THD *thd, AccessPath *child, JOIN *join, Temp_table_param *temp_table_param, TABLE *table, int ref_slice)
Definition: access_path.h:1511
AccessPath * NewRemoveDuplicatesAccessPath(THD *thd, AccessPath *child, Item **group_items, int group_items_size)
Definition: access_path.h:1651
AccessPath * NewInvalidatorAccessPath(THD *thd, AccessPath *child, const char *name)
Definition: access_path.h:1685
AccessPath * NewRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1260
AccessPath * NewDeleteRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:100
bool FinalizeMaterializedSubqueries(THD *thd, JOIN *join, AccessPath *path)
If the path is a FILTER path marked that subqueries are to be materialized, do so.
Definition: access_path.cc:307
void CopyBasicProperties(const AccessPath &from, AccessPath *to)
Definition: access_path.h:1226
void ExpandSingleFilterAccessPath(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
Like ExpandFilterAccessPaths(), but expands only the single access path at “path”.
Definition: access_path.cc:1349
AccessPath * NewFullTextSearchAccessPath(THD *thd, TABLE *table, Index_lookup *ref, Item_func_match *ft_func, bool use_order, bool use_limit, bool count_examined_rows)
Definition: access_path.h:1309
AccessPath * NewTemptableAggregateAccessPath(THD *thd, AccessPath *subquery_path, Temp_table_param *temp_table_param, TABLE *table, AccessPath *table_path, int ref_slice)
Definition: access_path.h:1436
table_map GetHashJoinTables(AccessPath *path)
Returns the tables that are part of a hash join.
Definition: access_path.cc:1471
AccessPath * NewDynamicIndexRangeScanAccessPath(THD *thd, TABLE *table, QEP_TAB *qep_tab, bool count_examined_rows)
Definition: access_path.h:1363
AccessPath * NewZeroRowsAggregatedAccessPath(THD *thd, const char *cause)
Definition: access_path.h:1500
AccessPath * NewAlternativeAccessPath(THD *thd, AccessPath *child, AccessPath *table_scan_path, Index_lookup *used_ref)
Definition: access_path.h:1674
AccessPath * NewUpdateRowsAccessPath(THD *thd, AccessPath *child, table_map delete_tables, table_map immediate_tables)
Definition: access_path.cc:112
AccessPath * NewMRRAccessPath(THD *thd, TABLE *table, Index_lookup *ref, int mrr_flags)
Definition: access_path.h:1340
table_map GetUsedTableMap(const AccessPath *path, bool include_pruned_tables)
Returns a map of all tables read when path or any of its children are executed.
Definition: access_path.cc:214
AccessPath * NewAppendAccessPath(THD *thd, Mem_root_array< AppendPathParameters > *children)
Definition: access_path.h:1606
AccessPath * NewNestedLoopSemiJoinWithDuplicateRemovalAccessPath(THD *thd, AccessPath *outer, AccessPath *inner, const TABLE *table, KEY *key, size_t key_len)
Definition: access_path.h:1399
AccessPath * NewRemoveDuplicatesOnIndexAccessPath(THD *thd, AccessPath *child, TABLE *table, KEY *key, unsigned loosescan_key_len)
Definition: access_path.h:1662
AccessPath * NewFakeSingleRowAccessPath(THD *thd, bool count_examined_rows)
Definition: access_path.h:1469
Item * ConditionFromFilterPredicates(const Mem_root_array< Predicate > &predicates, OverflowBitset mask, int num_where_predicates)
Extracts the Item expression from the given “filter_predicates” corresponding to the given “mask”.
Definition: access_path.cc:1338
AccessPath * NewFilterAccessPath(THD *thd, AccessPath *child, Item *condition)
Definition: access_path.h:1412
constexpr double kUnknownRowCount
To indicate that a row estimate is not yet made.
Definition: access_path.h:168
AccessPath * NewSortAccessPath(THD *thd, AccessPath *child, Filesort *filesort, ORDER *order, bool count_examined_rows)
Definition: access_path.cc:65
AccessPath * NewMaterializedTableFunctionAccessPath(THD *thd, TABLE *table, Table_function *table_function, AccessPath *table_path)
Definition: access_path.h:1373
AccessPath * NewMaterializeAccessPath(THD *thd, Mem_root_array< MaterializePathParameters::QueryBlock > query_blocks, Mem_root_array< const AccessPath * > *invalidators, TABLE *table, AccessPath *table_path, Common_table_expr *cte, Query_expression *unit, int ref_slice, bool rematerialize, ha_rows limit_rows, bool reject_multiple_rows)
Definition: access_path.h:1543
Mem_root_array< TABLE * > CollectTables(THD *thd, AccessPath *root_path)
Find the list of all tables used by this root, stopping at materializations.
Definition: access_path.cc:245
AccessPath * NewTableScanAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1238
void ExpandFilterAccessPaths(THD *thd, AccessPath *path, const JOIN *join, const Mem_root_array< Predicate > &predicates, unsigned num_where_predicates)
For each access path in the (sub)tree rooted at “path”, expand any use of “filter_predicates” into ne...
Definition: access_path.cc:1459
AccessPath * NewMaterializeInformationSchemaTableAccessPath(THD *thd, AccessPath *table_path, Table_ref *table_list, Item *condition)
Definition: access_path.h:1594
void FindTablesToGetRowidFor(AccessPath *path)
Modifies "path" and the paths below it so that they provide row IDs for all tables.
Definition: access_path.cc:1194
Mem_root_array< MaterializePathParameters::QueryBlock > SingleMaterializeQueryBlock(THD *thd, AccessPath *path, int select_number, JOIN *join, bool copy_items, Temp_table_param *temp_table_param)
Definition: access_path.h:1528
AccessPath * NewAggregateAccessPath(THD *thd, AccessPath *child, bool rollup)
Definition: access_path.h:1427
TABLE * GetBasicTable(const AccessPath *path)
Return the TABLE* referred from 'path' if it is a basic access path, else a nullptr is returned.
Definition: access_path.cc:161
AccessPath * NewRefOrNullAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool count_examined_rows)
Definition: access_path.h:1273
AccessPath * NewLimitOffsetAccessPath(THD *thd, AccessPath *child, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *send_records_override)
Definition: access_path.h:1449
AccessPath * NewEQRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1285
AccessPath * NewWindowAccessPath(THD *thd, AccessPath *child, Window *window, Temp_table_param *temp_table_param, int ref_slice, bool needs_buffering)
Definition: access_path.h:1625
AccessPath * NewFollowTailAccessPath(THD *thd, TABLE *table, bool count_examined_rows)
Definition: access_path.h:1354
AccessPath * NewTableValueConstructorAccessPath(THD *thd)
Definition: access_path.h:1390
AccessPath * NewConstTableAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool count_examined_rows)
Definition: access_path.h:1325
unique_ptr_destroy_only< RowIterator > CreateIteratorFromAccessPath(THD *thd, MEM_ROOT *mem_root, AccessPath *path, JOIN *join, bool eligible_for_batch_mode)
Definition: access_path.cc:377
AccessPath * NewWeedoutAccessPath(THD *thd, AccessPath *child, SJ_TMP_TABLE *weedout_table)
Definition: access_path.h:1640
AccessPath * NewPushedJoinRefAccessPath(THD *thd, TABLE *table, Index_lookup *ref, bool use_order, bool is_unique, bool count_examined_rows)
Definition: access_path.h:1295
AccessPath * NewZeroRowsAccessPath(THD *thd, AccessPath *child, const char *cause)
Definition: access_path.h:1481
AccessPath * NewUnqualifiedCountAccessPath(THD *thd)
Definition: access_path.h:1384
AccessPath * NewIndexScanAccessPath(THD *thd, TABLE *table, int idx, bool use_order, bool reverse, bool count_examined_rows)
Definition: access_path.h:1247
A wrapper class which provides array bounds checking.
Definition: sql_array.h:46
After parsing, a Common Table Expression is accessed through a Table_ref.
Definition: table.h:4295
API for getting cost estimates for server operations that are not directly related to a table object.
Definition: opt_costmodel.h:51
Sorting related info.
Definition: filesort.h:51
Definition: item_func.h:3387
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:850
Definition: sql_optimizer.h:132
Definition: key.h:112
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:425
Definition: overflow_bitset.h:76
Definition: sql_executor.h:259
Definition: range_optimizer.h:68
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:622
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:81
Definition: sql_executor.h:103
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:33
MEM_ROOT * mem_root
Definition: sql_lexer_thd.h:37
Class representing a table function.
Definition: table_function.h:52
Definition: table.h:2761
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:94
Represents the (explicit) window of a SQL 2003 section 7.11 <window clause>, or the implicit (inlined...
Definition: window.h:104
static MEM_ROOT mem_root
Definition: client_plugin.cc:109
void EstimateLimitOffsetCost(AccessPath *path)
Estimate the costs and row count for a LIMIT_OFFSET AccessPath.
Definition: cost_model.cc:886
bool filesort(THD *thd, Filesort *filesort, RowIterator *source_iterator, table_map tables_to_get_rowid_for, ha_rows num_rows_estimate, Filesort_info *fs_info, Sort_result *sort_result, ha_rows *found_rows)
Sort a table.
Definition: filesort.cc:366
void SetCostOnTableAccessPath(const Cost_model_server &cost_model, const POSITION *pos, bool is_after_filter, AccessPath *path)
Definition: sql_executor.cc:1852
std::bitset< kMaxSupportedFDs > FunctionalDependencySet
Definition: interesting_orders_defs.h:62
JoinType
Definition: join_type.h:27
static mi_bit_type mask[]
Definition: mi_packrec.cc:140
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:488
my_off_t ha_rows
Definition: my_base.h:1139
#define HA_POS_ERROR
Definition: my_base.h:1141
uint64_t table_map
Definition: my_table_map.h:29
static char * path
Definition: mysqldump.cc:133
PT & ref(PT *tp)
Definition: tablespace_impl.cc:358
uint64_t NodeMap
Since our graphs can never have more than 61 tables, node sets and edge lists are implemented using 6...
Definition: node_map.h:39
RangeReverse< Range > reverse(Range &x)
Iterate over a range in reverse.
Definition: utilities.h:131
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:150
OverflowBitset is a fixed-size (once allocated) bitmap that is optimized for the common case of few e...
required string key
Definition: replication_asynchronous_connection_failover.proto:59
case opt name
Definition: sslopt-case.h:32
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:192
auto & weedout()
Definition: access_path.h:789
auto & filter()
Definition: access_path.h:709
bool count_all_rows
Definition: access_path.h:1135
AccessPath * bka_path
Definition: access_path.h:905
auto & ref_or_null()
Definition: access_path.h:517
auto & materialized_table_function()
Definition: access_path.h:629
AccessPath * cpk_child
Definition: access_path.h:966
auto & rowid_union()
Definition: access_path.h:597
double cost_before_filter
Definition: access_path.h:395
hypergraph::NodeMap parameter_tables
If nonzero, a bitmap of other tables whose joined-in rows must already be loaded when rows from this ...
Definition: access_path.h:481
const auto & bka_join() const
Definition: access_path.h:689
TABLE * temp_table
Definition: access_path.h:1172
OverflowBitset equijoin_predicates
Definition: access_path.h:1076
Item ** group_items
Definition: access_path.h:1184
double rescan_cost() const
Return the cost of scanning the given path for the second time (or later) in the given query block.
Definition: access_path.h:390
bool rollup
Definition: access_path.h:1122
auto & materialize()
Definition: access_path.h:757
const auto & temptable_aggregate() const
Definition: access_path.h:737
OverflowBitset & subsumed_sargable_join_predicates()
Similar to applied_sargable_join_predicates, bitmap of sargable join predicates that have been applie...
Definition: access_path.h:453
auto & index_scan()
Definition: access_path.h:501
const auto & delete_rows() const
Definition: access_path.h:833
struct AccessPath::@55::@81 nested_loop_join
const auto & filter() const
Definition: access_path.h:713
auto & delete_rows()
Definition: access_path.h:829
bool reuse_handler
Definition: access_path.h:943
KEY_PART * used_key_part
Definition: access_path.h:917
struct AccessPath::@55::@90 materialize_information_schema_table
struct AccessPath::@55::@71 group_index_skip_scan
OverflowBitset & applied_sargable_join_predicates()
Bitmap of sargable join predicates that have already been applied in this access path by means of an ...
Definition: access_path.h:433
Item * condition
Definition: access_path.h:1094
struct AccessPath::@55::@74 unqualified_count
const auto & index_merge() const
Definition: access_path.h:585
const auto & update_rows() const
Definition: access_path.h:841
enum AccessPath::Type type
auto & alternative()
Definition: access_path.h:813
auto & pushed_join_ref()
Definition: access_path.h:533
AccessPath * outer
Definition: access_path.h:1050
auto & index_skip_scan()
Definition: access_path.h:605
struct AccessPath::@55::@95 remove_duplicates_on_index
bool rewrite_semi_to_inner
Definition: access_path.h:1054
const auto & zero_rows_aggregated() const
Definition: access_path.h:673
auto & group_index_skip_scan()
Definition: access_path.h:613
struct AccessPath::@55::@76 fake_single_row
const auto & eq_ref() const
Definition: access_path.h:529
int group_items_size
Definition: access_path.h:1185
bool use_order
Definition: access_path.h:867
struct AccessPath::@55::@88 stream
bool pfs_batch_mode
Definition: access_path.h:1068
auto & temptable_aggregate()
Definition: access_path.h:733
AccessPath()
Definition: access_path.h:198
GroupIndexSkipScanParameters * param
Definition: access_path.h:1010
auto & rowid_intersection()
Definition: access_path.h:589
bool materialize_subqueries
Definition: access_path.h:1104
AccessPath * child
Definition: access_path.h:1038
bool allow_spill_to_disk
Definition: access_path.h:1052
Index_lookup * used_ref
Definition: access_path.h:1198
struct AccessPath::@55::@78 zero_rows_aggregated
auto & mrr()
Definition: access_path.h:557
struct AccessPath::@55::@70 index_skip_scan
const auto & index_scan() const
Definition: access_path.h:505
const auto & fake_single_row() const
Definition: access_path.h:657
bool reject_multiple_rows
Definition: access_path.h:1136
bool allow_clustered_primary_key_scan
Definition: access_path.h:958
const char * name
Definition: access_path.h:1202
bool use_limit
Definition: access_path.h:895
Window * window
Definition: access_path.h:1171
table_map tables_to_get_rowid_for
Definition: access_path.h:1055
auto & materialize_information_schema_table()
Definition: access_path.h:765
auto & bka_join()
Definition: access_path.h:685
const auto & follow_tail() const
Definition: access_path.h:569
struct AccessPath::@55::@83 filter
SJ_TMP_TABLE * weedout_table
Definition: access_path.h:1179
Index_lookup * ref
Definition: access_path.h:872
const auto & window() const
Definition: access_path.h:785
const auto & materialized_table_function() const
Definition: access_path.h:633
auto & unqualified_count()
Definition: access_path.h:637
bool keep_current_rowid
Definition: access_path.h:907
bool using_extended_key_parts
Definition: access_path.h:953
auto & nested_loop_join()
Definition: access_path.h:693
const auto & full_text_search() const
Definition: access_path.h:545
bool can_be_used_for_ror
Definition: access_path.h:933
float rec_per_key
Definition: access_path.h:1061
Safety safe_for_rowid
Whether it is safe to get row IDs (for sorting) from this access path.
Definition: access_path.h:291
JOIN * join
Definition: access_path.h:1143
struct AccessPath::@55::@69 rowid_union
unsigned index
Definition: access_path.h:929
unsigned mrr_buf_size
Definition: access_path.h:925
struct AccessPath::@55::@60 eq_ref
auto & remove_duplicates()
Definition: access_path.h:797
struct AccessPath::@55::@68 rowid_intersection
struct AccessPath::@55::@61 pushed_join_ref
struct AccessPath::@55::@63 const_table
RowIterator * iterator
If an iterator has been instantiated for this access path, points to the iterator.
Definition: access_path.h:358
const auto & stream() const
Definition: access_path.h:753
double init_cost
Expected cost to initialize this access path; ie., cost to read k out of N rows would be init_cost + ...
Definition: access_path.h:370
const auto & remove_duplicates() const
Definition: access_path.h:801
auto & window()
Definition: access_path.h:781
bool need_rows_in_rowid_order
Definition: access_path.h:937
Table_ref * table_list
Definition: access_path.h:1163
const auto & unqualified_count() const
Definition: access_path.h:641
void * secondary_engine_data
Auxiliary data used by a secondary storage engine while processing the access path during optimizatio...
Definition: access_path.h:490
struct AccessPath::@55::@86 temptable_aggregate
struct AccessPath::@55::@85 aggregate
void set_num_output_rows(double val)
Definition: access_path.h:848
bool is_covering
Definition: access_path.h:986
const auto & mrr() const
Definition: access_path.h:561
bool remove_duplicates
Definition: access_path.h:1116
table_map tables_to_update
Definition: access_path.h:1211
const auto & table_value_constructor() const
Definition: access_path.h:649
KEY * key
Definition: access_path.h:1088
auto & table_value_constructor()
Definition: access_path.h:645
auto & stream()
Definition: access_path.h:749
size_t key_len
Definition: access_path.h:1089
const OverflowBitset & subsumed_sargable_join_predicates() const
Definition: access_path.h:456
const auto & zero_rows() const
Definition: access_path.h:665
const auto & append() const
Definition: access_path.h:777
ha_rows offset
Definition: access_path.h:1134
int idx
Definition: access_path.h:866
struct AccessPath::@55::@87 limit_offset
auto & hash_join()
Definition: access_path.h:677
auto & cache_invalidator()
Definition: access_path.h:821
struct AccessPath::@55::@73 materialized_table_function
bool force_sort_rowids
Definition: access_path.h:1118
unsigned num_used_key_parts
Definition: access_path.h:930
unsigned num_ranges
Definition: access_path.h:922
ORDER * order
Definition: access_path.h:1114
auto & sort()
Definition: access_path.h:717
auto & fake_single_row()
Definition: access_path.h:653
ha_rows limit
Definition: access_path.h:1115
auto & follow_tail()
Definition: access_path.h:565
double m_num_output_rows
Expected number of output rows, -1.0 for unknown.
Definition: access_path.h:852
const JoinPredicate * join_predicate
Definition: access_path.h:1051
const auto & const_table() const
Definition: access_path.h:553
union AccessPath::@55 u
const auto & dynamic_index_range_scan() const
Definition: access_path.h:625
const auto & table_scan() const
Definition: access_path.h:497
unsigned mrr_flags
Definition: access_path.h:924
int ref_slice
Definition: access_path.h:1129
OverflowBitset filter_predicates
Bitmap of WHERE predicates that we are including on this access path, referring to the “predicates” a...
Definition: access_path.h:417
bool can_be_used_for_imerge
Definition: access_path.h:940
bool forced_by_hint
Definition: access_path.h:957
auto & limit_offset()
Definition: access_path.h:741
AccessPath * inner
Definition: access_path.h:1050
bool provide_rowid
Definition: access_path.h:1146
const auto & index_range_scan() const
Definition: access_path.h:577
struct AccessPath::@55::@59 ref_or_null
const auto & ref() const
Definition: access_path.h:513
bool forced_by_dbug
Whether this access path is forced preferred over all others by means of a SET DEBUG force_subplan_0x...
Definition: access_path.h:302
Item_func_match * ft_func
Definition: access_path.h:896
struct AccessPath::@55::@79 hash_join
ha_rows * send_records_override
Definition: access_path.h:1139
struct AccessPath::@55::@77 zero_rows
auto & table_scan()
Definition: access_path.h:493
const auto & hash_join() const
Definition: access_path.h:681
QEP_TAB * qep_tab
Definition: access_path.h:1014
Table_function * table_function
Definition: access_path.h:1018
auto & zero_rows_aggregated()
Definition: access_path.h:669
const auto & sort() const
Definition: access_path.h:721
bool unwrap_rollup
Definition: access_path.h:1117
struct AccessPath::@55::@93 weedout
double init_once_cost
Of init_cost, how much of the initialization needs only to be done once per query block.
Definition: access_path.h:382
Type
Definition: access_path.h:207
@ FOLLOW_TAIL
Definition: access_path.h:220
@ FILTER
Definition: access_path.h:244
@ PUSHED_JOIN_REF
Definition: access_path.h:216
@ ZERO_ROWS_AGGREGATED
Definition: access_path.h:233
@ UPDATE_ROWS
Definition: access_path.h:262
@ AGGREGATE
Definition: access_path.h:246
@ BKA_JOIN
Definition: access_path.h:240
@ ZERO_ROWS
Definition: access_path.h:232
@ CONST_TABLE
Definition: access_path.h:218
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:226
@ INDEX_RANGE_SCAN
Definition: access_path.h:221
@ UNQUALIFIED_COUNT
Definition: access_path.h:235
@ EQ_REF
Definition: access_path.h:215
@ FAKE_SINGLE_ROW
Definition: access_path.h:231
@ MATERIALIZE_INFORMATION_SCHEMA_TABLE
Definition: access_path.h:251
@ WINDOW
Definition: access_path.h:253
@ REF_OR_NULL
Definition: access_path.h:214
@ MATERIALIZE
Definition: access_path.h:250
@ NESTED_LOOP_SEMIJOIN_WITH_DUPLICATE_REMOVAL
Definition: access_path.h:239
@ ROWID_UNION
Definition: access_path.h:224
@ INDEX_SKIP_SCAN
Definition: access_path.h:225
@ MRR
Definition: access_path.h:219
@ CACHE_INVALIDATOR
Definition: access_path.h:258
@ INDEX_SCAN
Definition: access_path.h:212
@ TABLE_VALUE_CONSTRUCTOR
Definition: access_path.h:230
@ WEEDOUT
Definition: access_path.h:254
@ MATERIALIZED_TABLE_FUNCTION
Definition: access_path.h:234
@ REMOVE_DUPLICATES_ON_INDEX
Definition: access_path.h:256
@ TABLE_SCAN
Definition: access_path.h:211
@ REF
Definition: access_path.h:213
@ TEMPTABLE_AGGREGATE
Definition: access_path.h:247
@ LIMIT_OFFSET
Definition: access_path.h:248
@ APPEND
Definition: access_path.h:252
@ NESTED_LOOP_JOIN
Definition: access_path.h:238
@ INDEX_MERGE
Definition: access_path.h:222
@ FULL_TEXT_SEARCH
Definition: access_path.h:217
@ ALTERNATIVE
Definition: access_path.h:257
@ STREAM
Definition: access_path.h:249
@ REMOVE_DUPLICATES
Definition: access_path.h:255
@ ROWID_INTERSECTION
Definition: access_path.h:223
@ DYNAMIC_INDEX_RANGE_SCAN
Definition: access_path.h:227
@ DELETE_ROWS
Definition: access_path.h:261
@ SORT
Definition: access_path.h:245
@ HASH_JOIN
Definition: access_path.h:241
bool retrieve_full_rows
Definition: access_path.h:969
const auto & rowid_union() const
Definition: access_path.h:601
const TABLE * table
Definition: access_path.h:1087
const auto & index_skip_scan() const
Definition: access_path.h:609
auto & dynamic_index_range_scan()
Definition: access_path.h:621
const auto & pushed_join_ref() const
Definition: access_path.h:537
struct AccessPath::@55::@72 dynamic_index_range_scan
auto & remove_duplicates_on_index()
Definition: access_path.h:805
table_map immediate_tables
Definition: access_path.h:1207
struct AccessPath::@55::@64 mrr
int mrr_flags
Definition: access_path.h:906
auto & full_text_search()
Definition: access_path.h:541
const auto & materialize() const
Definition: access_path.h:761
int ordering_state
Which ordering the rows produced by this path follow, if any (see interesting_orders....
Definition: access_path.h:352
auto & eq_ref()
Definition: access_path.h:525
JoinType join_type
Definition: access_path.h:1059
struct AccessPath::@55::@84 sort
struct AccessPath::@55::@66 index_range_scan
struct AccessPath::@55::@97 cache_invalidator
const auto & limit_offset() const
Definition: access_path.h:745
MaterializePathParameters * param
Definition: access_path.h:1157
const char * cause
Definition: access_path.h:1041
struct AccessPath::@55::@98 delete_rows
auto & update_rows()
Definition: access_path.h:837
auto & index_merge()
Definition: access_path.h:581
auto & aggregate()
Definition: access_path.h:725
struct AccessPath::@55::@62 full_text_search
const auto & group_index_skip_scan() const
Definition: access_path.h:617
auto & zero_rows()
Definition: access_path.h:661
AccessPath * subquery_path
Definition: access_path.h:1125
Mem_root_array< AppendPathParameters > * children
Definition: access_path.h:1167
double cost
Expected cost to read all of this access path once; -1.0 for unknown.
Definition: access_path.h:361
const auto & nested_loop_join() const
Definition: access_path.h:697
struct AccessPath::@55::@96 alternative
bool already_expanded_predicates
Definition: access_path.h:1069
const auto & remove_duplicates_on_index() const
Definition: access_path.h:809
struct AccessPath::@55::@67 index_merge
Temp_table_param * temp_table_param
Definition: access_path.h:1126
auto & nested_loop_semijoin_with_duplicate_removal()
Definition: access_path.h:701
bool reverse
Definition: access_path.h:868
AccessPath * table_scan_path
Definition: access_path.h:1194
const auto & materialize_information_schema_table() const
Definition: access_path.h:769
double subquery_cost
The total cost of executing the queries that we materialize.
Definition: access_path.h:1159
bool geometry
Definition: access_path.h:946
bool count_examined_rows
Whether this access path counts as one that scans a base table, and thus should be counted towards ex...
Definition: access_path.h:297
unsigned loosescan_key_len
Definition: access_path.h:1191
auto & ref()
Definition: access_path.h:509
struct AccessPath::@55::@80 bka_join
struct AccessPath::@55::@82 nested_loop_semijoin_with_duplicate_removal
table_map tables_to_delete_from
Definition: access_path.h:1206
struct AccessPath::@55::@57 index_scan
IndexSkipScanParameters * param
Definition: access_path.h:1001
const OverflowBitset & applied_sargable_join_predicates() const
Definition: access_path.h:436
Mem_root_array< AccessPath * > * children
Definition: access_path.h:959
OverflowBitset delayed_predicates
Bitmap of WHERE predicates that touch tables we have joined in, but that we could not apply yet (for ...
Definition: access_path.h:445
AccessPath * table_path
Definition: access_path.h:1019
const auto & aggregate() const
Definition: access_path.h:729
TABLE * table
Definition: access_path.h:862
auto & append()
Definition: access_path.h:773
struct AccessPath::@55::@99 update_rows
const auto & weedout() const
Definition: access_path.h:793
const auto & cache_invalidator() const
Definition: access_path.h:825
bool needs_buffering
Definition: access_path.h:1175
int8_t immediate_update_delete_table
For UPDATE and DELETE statements: The node index of a table which can be updated or deleted from imme...
Definition: access_path.h:346
QUICK_RANGE ** ranges
Definition: access_path.h:921
bool is_unique
Definition: access_path.h:889
const auto & alternative() const
Definition: access_path.h:817
Safety
A general enum to describe the safety of a given operation.
Definition: access_path.h:272
@ SAFE_IF_SCANNED_ONCE
The given operation is safe if this access path is scanned once, but not if it's scanned multiple tim...
Definition: access_path.h:281
@ UNSAFE
The given operation is unsafe on this access path, no matter how many or few times it's scanned.
Definition: access_path.h:287
@ SAFE
The given operation is always safe on this access path.
Definition: access_path.h:274
auto & const_table()
Definition: access_path.h:549
struct AccessPath::@55::@75 table_value_constructor
unsigned mrr_length_per_rec
Definition: access_path.h:1060
struct AccessPath::@55::@65 follow_tail
const auto & rowid_intersection() const
Definition: access_path.h:593
auto & index_range_scan()
Definition: access_path.h:573
double num_output_rows() const
Definition: access_path.h:846
struct AccessPath::@55::@89 materialize
double num_output_rows_before_filter
If no filter, identical to num_output_rows, cost, respectively.
Definition: access_path.h:394
const auto & ref_or_null() const
Definition: access_path.h:521
struct AccessPath::@55::@91 append
Filesort * filesort
Definition: access_path.h:1108
bool store_rowids
Definition: access_path.h:1053
const auto & nested_loop_semijoin_with_duplicate_removal() const
Definition: access_path.h:705
struct AccessPath::@55::@56 table_scan
Definition: access_path.h:162
JOIN * join
Definition: access_path.h:164
AccessPath * path
Definition: access_path.h:163
Definition: group_index_skip_scan_plan.h:44
Logically a part of AccessPath::index_skip_scan(), but is too large, so split out into its own struct...
Definition: index_skip_scan_plan.h:72
Structure used for index-based lookups.
Definition: sql_opt_exec_shared.h:66
A specification that two specific relational expressions (e.g., two tables, or a table and a join bet...
Definition: access_path.h:75
int semijoin_group_size
Definition: access_path.h:108
FunctionalDependencySet functional_dependencies
Definition: access_path.h:92
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:97
RelationalExpression * expr
Definition: access_path.h:76
double selectivity
Definition: access_path.h:77
int ordering_idx_needed_for_semijoin_rewrite
Definition: access_path.h:102
Item ** semijoin_group
Definition: access_path.h:107
size_t estimated_bytes_per_row
Definition: access_path.h:81
Definition: range_optimizer.h:54
The MEM_ROOT is a simple arena, where allocations are carved out of larger blocks.
Definition: my_alloc.h:82
Definition: materialize_path_parameters.h:41
int select_number
Definition: materialize_path_parameters.h:43
bool copy_items
Definition: materialize_path_parameters.h:46
Temp_table_param * temp_table_param
Definition: materialize_path_parameters.h:47
AccessPath * subquery_path
Definition: materialize_path_parameters.h:42
bool disable_deduplication_by_hash_field
Definition: materialize_path_parameters.h:45
JOIN * join
Definition: materialize_path_parameters.h:44
Definition: materialize_path_parameters.h:39
bool rematerialize
True if rematerializing on every Init() call (e.g., because we have a dependency on a value from outs...
Definition: materialize_path_parameters.h:79
Common_table_expr * cte
If materializing a CTE, points to it (see m_cte), otherwise nullptr.
Definition: materialize_path_parameters.h:64
bool reject_multiple_rows
True if this is the top level iterator for a materialized derived table transformed from a scalar sub...
Definition: materialize_path_parameters.h:98
ha_rows limit_rows
Used for when pushing LIMIT down to MaterializeIterator; this is more efficient than having a LimitOf...
Definition: materialize_path_parameters.h:91
Mem_root_array< QueryBlock > query_blocks
Definition: materialize_path_parameters.h:57
TABLE * table
Handle to table to materialize into.
Definition: materialize_path_parameters.h:61
int ref_slice
Definition: materialize_path_parameters.h:73
Query_expression * unit
The query expression we are materializing.
Definition: materialize_path_parameters.h:67
Mem_root_array< const AccessPath * > * invalidators
Definition: materialize_path_parameters.h:58
Definition: table.h:280
A position of table within a join order.
Definition: sql_select.h:351
A filter of some sort that is not a join condition (those are stored in JoinPredicate objects).
Definition: access_path.h:116
hypergraph::NodeMap total_eligibility_set
Definition: access_path.h:129
bool was_join_condition
Definition: access_path.h:142
Mem_root_array< int > functional_dependencies_idx
Definition: access_path.h:154
FunctionalDependencySet functional_dependencies
Definition: access_path.h:153
int source_multiple_equality_idx
Definition: access_path.h:150
hypergraph::NodeMap used_nodes
Definition: access_path.h:120
Item * condition
Definition: access_path.h:117
double selectivity
Definition: access_path.h:131
Mem_root_array< ContainedSubquery > contained_subqueries
Definition: access_path.h:159
Represents an expression tree in the relational algebra of joins.
Definition: relational_expression.h:80
Definition: table.h:1395
bool is_union_or_table() const
Test if this tmp table stores the result of a UNION set operation or a single table.
Definition: table.h:1547