MySQL 9.7.0
Source Code Documentation
composite_iterators.h
Go to the documentation of this file.
1#ifndef SQL_ITERATORS_COMPOSITE_ITERATORS_H_
2#define SQL_ITERATORS_COMPOSITE_ITERATORS_H_
3
4/* Copyright (c) 2018, 2026, Oracle and/or its affiliates.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License, version 2.0,
8 as published by the Free Software Foundation.
9
10 This program is designed to work with certain software (including
11 but not limited to OpenSSL) that is licensed under separate terms,
12 as designated in a particular file or component or in included license
13 documentation. The authors of MySQL hereby grant you an additional
14 permission to link the program and your derivative works with the
15 separately licensed software that they have either included with
16 the program or referenced in the documentation.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License, version 2.0, for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
26
27/**
28 @file composite_iterators.h
29
30 A composite row iterator is one that takes in one or more existing iterators
31 and processes their rows in some interesting way. They are usually not bound
32 to a single table or similar, but are the inner (non-leaf) nodes of the
33 iterator execution tree. They consistently own their source iterator, although
34 not its memory (since we never allocate row iterators on the heap--usually on
35 a MEM_ROOT>). This means that in the end, you'll end up with a single root
36 iterator which then owns everything else recursively.
37
38 SortingIterator and the two window iterators are also composite iterators,
39 but are defined in their own files.
40 */
41
42#include <assert.h>
43#include <stddef.h>
44#include <stdint.h>
45#include <sys/types.h>
46#include <memory>
47#include <span>
48#include <string>
49#include <utility>
50#include <vector>
51
52#include "my_alloc.h"
53#include "my_base.h"
54#include "my_inttypes.h"
55#include "my_table_map.h"
58#include "sql/join_type.h"
59#include "sql/mem_root_array.h"
60#include "sql/pack_rows.h"
61#include "sql/sql_array.h"
62#include "sql_string.h"
63
64class Cached_item;
66class Item;
67class JOIN;
68class KEY;
70class SJ_TMP_TABLE;
71class Table_ref;
72class THD;
73class Table_function;
75struct TABLE;
76
77/**
78 An iterator that takes in a stream of rows and passes through only those that
79 meet some criteria (i.e., a condition evaluates to true). This is typically
80 used for WHERE/HAVING.
81 */
82class FilterIterator final : public RowIterator {
83 public:
85 Item *condition)
86 : RowIterator(thd), m_source(std::move(source)), m_condition(condition) {}
87
88 void SetNullRowFlag(bool is_null_row) override {
89 m_source->SetNullRowFlag(is_null_row);
90 }
91
92 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
93 void EndPSIBatchModeIfStarted() override {
94 m_source->EndPSIBatchModeIfStarted();
95 }
96 void UnlockRow() override { m_source->UnlockRow(); }
97
98 private:
99 bool DoInit() override;
100 int DoRead() override;
101
104 bool m_no_rows{false};
105};
106
107/**
108 Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read()
109 stops as soon as it's seen "limit" rows (including any skipped by offset).
110 */
111class LimitOffsetIterator final : public RowIterator {
112 public:
113 /**
114 @param thd Thread context
115 @param source Row source
116 @param limit Maximum number of rows to read, including the ones skipped by
117 offset. Can be HA_POS_ERROR for no limit.
118 @param offset Number of initial rows to skip. Can be 0 for no offset.
119 @param count_all_rows If true, the query will run to completion to get
120 more accurate numbers for skipped_rows, so you will not get any
121 performance benefits of early end.
122 @param reject_multiple_rows True if a derived table transformed from a
123 scalar subquery needs a run-time cardinality check
124 @param skipped_rows If not nullptr, is incremented for each row skipped by
125 offset or limit.
126 */
128 ha_rows limit, ha_rows offset, bool count_all_rows,
129 bool reject_multiple_rows, ha_rows *skipped_rows)
130 : RowIterator(thd),
131 m_source(std::move(source)),
132 m_limit(limit),
133 m_offset(offset),
134 m_count_all_rows(count_all_rows),
135 m_reject_multiple_rows(reject_multiple_rows),
136 m_skipped_rows(skipped_rows) {
137 if (count_all_rows) {
138 assert(m_skipped_rows != nullptr);
139 }
140 }
141
142 void SetNullRowFlag(bool is_null_row) override {
143 m_source->SetNullRowFlag(is_null_row);
144 }
145
146 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
147 void EndPSIBatchModeIfStarted() override {
148 m_source->EndPSIBatchModeIfStarted();
149 }
150 void UnlockRow() override { m_source->UnlockRow(); }
151
152 private:
153 bool DoInit() override;
154 int DoRead() override;
155
157
158 // Note: The number of seen rows starts off at m_limit if we have OFFSET,
159 // which means we don't need separate LIMIT and OFFSET tests on the
160 // fast path of Read().
162
163 /**
164 Whether we have OFFSET rows that we still need to skip.
165 */
167
172};
173
174/**
175 Handles aggregation (typically used for GROUP BY) for the case where the rows
176 are already properly grouped coming in, ie., all rows that are supposed to be
177 part of the same group are adjacent in the input stream. (This could be
178 because they were sorted earlier, because we are scanning an index that
179 already gives us the rows in a group-compatible order, or because there is no
180 grouping.)
181
182 AggregateIterator needs to be able to save and restore rows; it doesn't know
183 when a group ends until it's seen the first row that is part of the _next_
184 group. When that happens, it needs to tuck away that next row, and then
185 restore the previous row so that the output row gets the correct grouped
186 values. A simple example, doing SELECT a, SUM(b) FROM t1 GROUP BY a:
187
188 t1.a t1.b SUM(b)
189 1 1 <-- first row, save it 1
190 1 2 3
191 1 3 6
192 2 1 <-- group changed, save row
193 [1 1] <-- restore first row, output 6
194 reset aggregate --> 0
195 [2 1] <-- restore new row, process it 1
196 2 10 11
197 <-- EOF, output 11
198
199 To save and restore rows like this, it uses the infrastructure from
200 pack_rows.h to pack and unpack all relevant rows into record[0] of every input
201 table. (Currently, there can only be one input table, but this may very well
202 change in the future.) It would be nice to have a more abstract concept of
203 sending a row around and taking copies of it if needed, as opposed to it
204 implicitly staying in the table's buffer. (This would also solve some
205 issues in EQRefIterator and when synthesizing NULL rows for outer joins.)
206 However, that's a large refactoring.
207 */
208class AggregateIterator final : public RowIterator {
209 public:
212 std::span<AccessPath *> single_row_index_lookups,
213 bool rollup);
214
215 void SetNullRowFlag(bool is_null_row) override {
216 m_source->SetNullRowFlag(is_null_row);
217 }
218
219 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
220 void EndPSIBatchModeIfStarted() override {
221 m_source->EndPSIBatchModeIfStarted();
222 }
223 void UnlockRow() override {
224 // Most likely, HAVING failed. Ideally, we'd like to backtrack and
225 // unlock all rows that went into this aggregate, but we can't do that,
226 // and we also can't unlock the _current_ row, since that belongs to a
227 // different group. Thus, do nothing.
228 }
229
230 private:
231 bool DoInit() override;
232 int DoRead() override;
233
234 enum {
240
242
243 /**
244 The join we are part of. It would be nicer not to rely on this,
245 but we need a large number of members from there, like which
246 aggregate functions we have, the THD, temporary table parameters
247 and so on.
248 */
249 JOIN *m_join = nullptr;
250
251 /// Whether we have seen the last input row.
253
254 /**
255 Used to save NULL information in the specific case where we have
256 zero input rows.
257 */
259
260 /// Whether this is a rollup query.
261 const bool m_rollup;
262
263 /**
264 For rollup: The index of the first group item that did _not_ change when we
265 last switched groups. E.g., if we have group fields A,B,C,D and then switch
266 to group A,B,E,D, this value will become 1 (which means that we need
267 to output rollup rows for 2 -- A,B,E,NULL -- and then 1 -- A,B,NULL,NULL).
268 m_current_rollup_position will count down from the end until it becomes
269 less than this value.
270
271 If we do not have rollup, this value is perennially zero.
272 */
274
275 /**
276 If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
277 This value will start at the index of the last group expression and then
278 count backwards down to and including m_last_unchanged_group_item_idx.
279 It is used to communicate to the rollup group items whether to turn
280 themselves into NULLs, and the sum items which of their sums to output.
281 */
283
284 /**
285 The list of tables we are reading from; they are the ones for which we need
286 to save and restore rows.
287 */
289
290 /// Packed version of the first row in the group we are currently processing.
292
293 /**
294 If applicable, packed version of the first row in the _next_ group. This is
295 used only in the LAST_ROW_STARTED_NEW_GROUP state; we just saw a row that
296 didn't belong to the current group, so we saved it here and went to output
297 a group. On the next Read() call, we need to process this deferred row
298 first of all.
299
300 Even when not in use, this string contains a buffer that is large enough to
301 pack a full row into, sans blobs. (If blobs are present,
302 StoreFromTableBuffers() will automatically allocate more space if needed.)
303 */
305
306 /// All the single-row index lookups that provide rows to this iterator.
307 std::span<AccessPath *> m_single_row_index_lookups;
308
309 /**
310 The slice we're setting when returning rows. See the comment in the
311 constructor.
312 */
314
315 void SetRollupLevel(int level);
316};
317
318/**
319 A simple nested loop join, taking in two iterators (left/outer and
320 right/inner) and joining them together. This may, of course, scan the inner
321 iterator many times. It is currently the only form of join we have.
322
323 The iterator works as a state machine, where the state records whether we need
324 to read a new outer row or not, and whether we've seen any rows from the inner
325 iterator at all (if not, an outer join need to synthesize a new NULL row).
326
327 The iterator takes care of activating performance schema batch mode on the
328 right iterator if needed; this is typically only used if it is the innermost
329 table in the entire join (where the gains from turning on batch mode is the
330 largest, and the accuracy loss from turning it off are the least critical).
331 */
332class NestedLoopIterator final : public RowIterator {
333 public:
337 JoinType join_type, bool pfs_batch_mode)
338 : RowIterator(thd),
339 m_source_outer(std::move(source_outer)),
340 m_source_inner(std::move(source_inner)),
342 m_pfs_batch_mode(pfs_batch_mode) {
343 assert(m_source_outer != nullptr);
344 assert(m_source_inner != nullptr);
345
346 // Batch mode makes no sense for anti- or semijoins, since they should only
347 // be reading one row.
349 assert(!pfs_batch_mode);
350 }
351 }
352
353 void SetNullRowFlag(bool is_null_row) override {
354 // TODO: write something here about why we can't do this lazily.
355 m_source_outer->SetNullRowFlag(is_null_row);
356 m_source_inner->SetNullRowFlag(is_null_row);
357 }
358
359 void EndPSIBatchModeIfStarted() override {
360 m_source_outer->EndPSIBatchModeIfStarted();
361 m_source_inner->EndPSIBatchModeIfStarted();
362 }
363
364 void UnlockRow() override {
365 // Since we don't know which condition that caused the row to be rejected,
366 // we can't know whether we could also unlock the outer row
367 // (it may still be used as parts of other joined rows).
369 m_source_inner->UnlockRow();
370 }
371 }
372
373 private:
374 bool DoInit() override;
375 int DoRead() override;
376
377 enum {
383
387
388 /** Whether to use batch mode when scanning the inner iterator. */
390};
391
392/**
393 An iterator that helps invalidating caches. Every time a row passes through it
394 or it changes state in any other way, it increments its “generation” counter.
395 This allows MaterializeIterator to see whether any of its dependencies has
396 changed, and then force a rematerialization -- this is typically used for
397 LATERAL tables, where we're joining in a derived table that depends on
398 something earlier in the join.
399 */
401 public:
404 const std::string &name)
405 : RowIterator(thd),
406 m_source_iterator(std::move(source_iterator)),
407 m_name(name) {}
408
409 private:
410 bool DoInit() override {
411 ++m_generation;
412 return m_source_iterator->Init();
413 }
414
415 int DoRead() override {
416 ++m_generation;
417 return m_source_iterator->Read();
418 }
419
420 public:
421 void SetNullRowFlag(bool is_null_row) override {
422 ++m_generation;
423 m_source_iterator->SetNullRowFlag(is_null_row);
424 }
425
426 void UnlockRow() override { m_source_iterator->UnlockRow(); }
427
428 int64_t generation() const { return m_generation; }
429 std::string name() const { return m_name; }
430
431 private:
433 int64_t m_generation = 0;
434 std::string m_name;
435};
436
438/**
439 An operand (query block) to be materialized by MaterializeIterator.
440 (@see MaterializeIterator for details.)
441*/
442struct Operand {
443 /// The iterator to read the actual rows from.
445
446 /// Used only for optimizer trace.
448
449 /// The JOIN that this query block represents. Used for performance
450 /// schema batch mode: When materializing a query block that consists of
451 /// a single table, MaterializeIterator needs to set up schema batch mode,
452 /// since there is no nested loop iterator to do it. (This is similar to
453 /// what ExecuteIteratorQuery() needs to do at the top level.)
455
456 /// If true, de-duplication checking via hash key is disabled
457 /// when materializing this query block (ie., we simply avoid calling
458 /// check_unique_fields() for each row). Used when materializing
459 /// UNION DISTINCT and UNION ALL parts into the same table.
460 /// We'd like to just use a unique constraint via unique index instead,
461 /// but there might be other indexes on the destination table
462 /// that we'd like to keep, and the implementation doesn't allow
463 /// disabling only one index.
464 ///
465 /// If you use this on a query block, doing_hash_deduplication()
466 /// must be true.
468
469 /// If set to false, the Field objects in the output row are
470 /// presumed already to be filled out. This is the case iff
471 /// there's a windowing iterator earlier in the chain.
473
474 /// The number of operands (i.e. blocks) involved in the set operation:
475 /// used for INTERSECT to determine if a value is present in all operands
477 /// The current operand (i.e. block) number, starting at zero. We use this
478 /// for INTERSECT and EXCEPT materialization operand.
480 /// Used for EXCEPT computation: the index of the first operand involved in
481 /// a N-ary except operation which has DISTINCT. This is significant for
482 /// calculating whether to set the counter to zero or just decrement it
483 /// when we see a right side operand.
485
486 /// If copy_items is true, used for copying the Field objects
487 /// into the temporary table row. Otherwise unused.
489
490 // Whether this query block is a recursive reference back to the
491 // output of the materialization.
493
494 // If is_recursive_reference is true, contains the FollowTailIterator
495 // in the query block (there can be at most one recursive reference
496 // in a join list, as per the SQL standard, so there should be exactly one).
497 // Used for informing the iterators about various shared state in the
498 // materialization (including coordinating rematerializations).
500
501 /// The estimated number of rows produced by this block
503};
504
505/**
506 Create an iterator that materializes a set of row into a temporary table
507 and sets up a (pre-existing) iterator to access that.
508 @see MaterializeIterator.
509
510 @param thd Thread handler.
511 @param operands List of operands (query blocks) to materialize.
512 @param path_params MaterializePath settings.
513 @param table_iterator Iterator used for accessing the temporary table
514 after materialization.
515 @param join
516 When materializing within the same JOIN (e.g., into a temporary table
517 before sorting), as opposed to a derived table or a CTE, we may need
518 to change the slice on the join before returning rows from the result
519 table. If so, join and ref_slice would need to be set, and
520 query_blocks_to_materialize should contain only one member, with the same
521 join.
522 @return the iterator.
523*/
526 const MaterializePathParameters *path_params,
528
529} // namespace materialize_iterator
530
532/**
533 Create an iterator that aggregates the output rows from another iterator
534 into a temporary table and then sets up a (pre-existing) iterator to
535 access the temporary table.
536 @see TemptableAggregateIterator.
537
538 @param thd Thread handler.
539 @param subquery_iterator input to aggregation.
540 @param temp_table_param temporary table settings.
541 @param table_iterator Iterator used for scanning the temporary table
542 after materialization.
543 @param table the temporary table.
544 @param join the JOIN in which we aggregate.
545 @param ref_slice the slice to set when accessing temporary table;
546 used if anything upstream wants to evaluate values based on its contents.
547 @return the iterator.
548*/
550 THD *thd, unique_ptr_destroy_only<RowIterator> subquery_iterator,
551 Temp_table_param *temp_table_param, TABLE *table,
553 int ref_slice);
554
555} // namespace temptable_aggregate_iterator
556
557/**
558 StreamingIterator is a minimal version of MaterializeIterator that does not
559 actually materialize; instead, every Read() just forwards the call to the
560 subquery iterator and does the required copying from one set of fields to
561 another.
562
563 It is used for when the optimizer would normally set up a materialization,
564 but you don't actually need one, ie. you don't want to read the rows multiple
565 times after writing them, and you don't want to access them by index (only
566 a single table scan). It also takes care of setting the NULL row flag
567 on the temporary table.
568 */
570 public:
571 /**
572 @param thd Thread handle.
573 @param subquery_iterator The iterator to read rows from.
574 @param temp_table_param Parameters for the temp table.
575 @param table The table we are streaming through. Will never actually
576 be written to, but its fields will be used.
577 @param provide_rowid If true, generate a row ID for each row we stream.
578 This is used if the parent needs row IDs for deduplication, in particular
579 weedout.
580 @param join See MaterializeIterator.
581 @param ref_slice See MaterializeIterator.
582 */
584 unique_ptr_destroy_only<RowIterator> subquery_iterator,
585 Temp_table_param *temp_table_param, TABLE *table,
586 bool provide_rowid, JOIN *join, int ref_slice);
587
588 void StartPSIBatchMode() override {
589 m_subquery_iterator->StartPSIBatchMode();
590 }
591 void EndPSIBatchModeIfStarted() override {
592 m_subquery_iterator->EndPSIBatchModeIfStarted();
593 }
594 void UnlockRow() override { m_subquery_iterator->UnlockRow(); }
595
596 private:
597 bool DoInit() override;
598 int DoRead() override;
602 JOIN *const m_join;
603 const int m_output_slice;
605
606 // Whether the iterator should generate and provide a row ID. Only true if the
607 // iterator is part of weedout, where the iterator will create a fake row ID
608 // to uniquely identify the rows it produces.
609 const bool m_provide_rowid;
610};
611
612/**
613 An iterator that wraps a Table_function (e.g. JSON_TABLE) and allows you to
614 iterate over the materialized temporary table. The table is materialized anew
615 for every Init().
616
617 TODO: Just wrapping it is probably not the optimal thing to do;
618 Table_function is highly oriented around materialization, but never caches.
619 Thus, perhaps we should rewrite Table_function to return a RowIterator
620 instead of going through a temporary table.
621 */
623 public:
625 THD *thd, Table_function *table_function, TABLE *table,
627
628 void SetNullRowFlag(bool is_null_row) override {
629 m_table_iterator->SetNullRowFlag(is_null_row);
630 }
631
632 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
633 void EndPSIBatchModeIfStarted() override {
634 m_table_iterator->EndPSIBatchModeIfStarted();
635 }
636
637 // The temporary table is private to us, so there's no need to worry about
638 // locks to other transactions.
639 void UnlockRow() override {}
640
641 private:
642 bool DoInit() override;
643 int DoRead() override { return m_table_iterator->Read(); }
644
646
648};
649
650/**
651 Like semijoin materialization, weedout works on the basic idea that a semijoin
652 is just like an inner join as we long as we can get rid of the duplicates
653 somehow. (This is advantageous, because inner joins can be reordered, whereas
654 semijoins generally can't.) However, unlike semijoin materialization, weedout
655 removes duplicates after the join, not before it. Consider something like
656
657 SELECT * FROM t1 WHERE a IN ( SELECT b FROM t2 );
658
659 Semijoin materialization solves this by materializing t2, with deduplication,
660 and then joining. Weedout joins t1 to t2 and then leaves only one output row
661 per t1 row. The disadvantage is that this potentially needs to discard more
662 rows; the (potential) advantage is that we deduplicate on t1 instead of t2.
663
664 Weedout, unlike materialization, works in a streaming fashion; rows are output
665 (or discarded) as they come in, with a temporary table used for recording the
666 row IDs we've seen before. (We need to deduplicate on t1's row IDs, not its
667 contents.) See SJ_TMP_TABLE for details about the table format.
668 */
669class WeedoutIterator final : public RowIterator {
670 public:
672 SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for);
673
674 void SetNullRowFlag(bool is_null_row) override {
675 m_source->SetNullRowFlag(is_null_row);
676 }
677
678 void EndPSIBatchModeIfStarted() override {
679 m_source->EndPSIBatchModeIfStarted();
680 }
681 void UnlockRow() override { m_source->UnlockRow(); }
682
683 private:
684 bool DoInit() override;
685 int DoRead() override;
689};
690
691/**
692 An iterator that removes consecutive rows that are the same according to
693 a set of items (typically the join key), so-called “loose scan”
694 (not to be confused with “loose index scan”, which is made by the
695 range optimizer). This is similar in spirit to WeedoutIterator above
696 (removing duplicates allows us to treat the semijoin as a normal join),
697 but is much cheaper if the data is already ordered/grouped correctly,
698 as the removal can happen before the join, and it does not need a
699 temporary table.
700 */
702 public:
705 JOIN *join, std::span<Item *> group_items);
706
707 void SetNullRowFlag(bool is_null_row) override {
708 m_source->SetNullRowFlag(is_null_row);
709 }
710
711 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
712 void EndPSIBatchModeIfStarted() override {
713 m_source->EndPSIBatchModeIfStarted();
714 }
715 void UnlockRow() override { m_source->UnlockRow(); }
716
717 private:
718 bool DoInit() override;
719 int DoRead() override;
723};
724
725/**
726 Much like RemoveDuplicatesIterator, but works on the basis of a given index
727 (or more accurately, its keypart), not an arbitrary list of grouped fields.
728 This is only used in the non-hypergraph optimizer; the hypergraph optimizer
729 can deal with groupings that come from e.g. sorts.
730 */
732 public:
735 const TABLE *table, KEY *key, size_t key_len);
736
737 void SetNullRowFlag(bool is_null_row) override {
738 m_source->SetNullRowFlag(is_null_row);
739 }
740
741 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
742 void EndPSIBatchModeIfStarted() override {
743 m_source->EndPSIBatchModeIfStarted();
744 }
745 void UnlockRow() override { m_source->UnlockRow(); }
746
747 private:
748 bool DoInit() override;
749 int DoRead() override;
750
754 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
755 const size_t m_key_len;
757};
758
759/**
760 An iterator that is semantically equivalent to a semijoin NestedLoopIterator
761 immediately followed by a RemoveDuplicatesOnIndexIterator. It is used to
762 implement the “loose scan” strategy in queries with multiple tables on the
763 inside of a semijoin, like
764
765 ... FROM t1 WHERE ... IN ( SELECT ... FROM t2 JOIN t3 ... )
766
767 In this case, the query tree without this iterator would ostensibly look like
768
769 -> Nested loop join
770 -> Table scan on t1
771 -> Remove duplicates on t2_idx
772 -> Nested loop semijoin
773 -> Index scan on t2 using t2_idx
774 -> Filter (e.g. t3.a = t2.a)
775 -> Table scan on t3
776
777 (t3 will be marked as “first match” on t2 when implementing loose scan,
778 thus the semijoin.)
779
780 First note that we can't put the duplicate removal directly on t2 in this
781 case, as the first t2 row doesn't necessarily match anything in t3, so it
782 needs to be above. However, this is wasteful, because once we find a matching
783 t2/t3 pair, we should stop scanning t3 until we have a new t2.
784
785 NestedLoopSemiJoinWithDuplicateRemovalIterator solves the problem by doing
786 exactly this; it gets a row from the outer side, gets exactly one row from the
787 inner side, and then skips over rows from the outer side (_without_ scanning
788 the inner side) until its keypart changes.
789 */
791 : public RowIterator {
792 public:
796 KEY *key, size_t key_len);
797
798 void SetNullRowFlag(bool is_null_row) override {
799 m_source_outer->SetNullRowFlag(is_null_row);
800 m_source_inner->SetNullRowFlag(is_null_row);
801 }
802
803 void EndPSIBatchModeIfStarted() override {
804 m_source_outer->EndPSIBatchModeIfStarted();
805 m_source_inner->EndPSIBatchModeIfStarted();
806 }
807
808 void UnlockRow() override {
809 m_source_outer->UnlockRow();
810 m_source_inner->UnlockRow();
811 }
812
813 private:
814 bool DoInit() override;
815 int DoRead() override;
816
819
822 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
823 const size_t m_key_len;
825};
826
827/**
828 MaterializeInformationSchemaTableIterator makes sure a given I_S temporary
829 table is materialized (filled out) before we try to scan it.
830 */
832 public:
835 Table_ref *table_list, Item *condition);
836
837 void SetNullRowFlag(bool is_null_row) override {
838 m_table_iterator->SetNullRowFlag(is_null_row);
839 }
840
841 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
842 void EndPSIBatchModeIfStarted() override {
843 m_table_iterator->EndPSIBatchModeIfStarted();
844 }
845
846 // The temporary table is private to us, so there's no need to worry about
847 // locks to other transactions.
848 void UnlockRow() override {}
849
850 private:
851 bool DoInit() override;
852 int DoRead() override { return m_table_iterator->Read(); }
853
854 /// The iterator that reads from the materialized table.
858};
859
860/**
861 Takes in two or more iterators and output rows from them sequentially
862 (first all rows from the first one, the all from the second one, etc.).
863 Used for implementing UNION ALL, typically together with StreamingIterator.
864 */
865class AppendIterator final : public RowIterator {
866 public:
868 THD *thd,
870
871 void StartPSIBatchMode() override;
872 void EndPSIBatchModeIfStarted() override;
873
874 void SetNullRowFlag(bool is_null_row) override;
875 void UnlockRow() override;
876
877 private:
878 bool DoInit() override;
879 int DoRead() override;
880
881 std::vector<unique_ptr_destroy_only<RowIterator>> m_sub_iterators;
884};
885
886#endif // SQL_ITERATORS_COMPOSITE_ITERATORS_H_
Handles aggregation (typically used for GROUP BY) for the case where the rows are already properly gr...
Definition: composite_iterators.h:208
void UnlockRow() override
Definition: composite_iterators.h:223
int m_current_rollup_position
If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
Definition: composite_iterators.h:282
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:219
JOIN * m_join
The join we are part of.
Definition: composite_iterators.h:249
bool m_seen_eof
Whether we have seen the last input row.
Definition: composite_iterators.h:252
int DoRead() override
Definition: composite_iterators.cc:287
AggregateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, pack_rows::TableCollection tables, std::span< AccessPath * > single_row_index_lookups, bool rollup)
Definition: composite_iterators.cc:223
bool DoInit() override
Definition: composite_iterators.cc:243
pack_rows::TableCollection m_tables
The list of tables we are reading from; they are the ones for which we need to save and restore rows.
Definition: composite_iterators.h:288
@ LAST_ROW_STARTED_NEW_GROUP
Definition: composite_iterators.h:236
@ READING_FIRST_ROW
Definition: composite_iterators.h:235
@ OUTPUTTING_ROLLUP_ROWS
Definition: composite_iterators.h:237
@ DONE_OUTPUTTING_ROWS
Definition: composite_iterators.h:238
String m_first_row_this_group
Packed version of the first row in the group we are currently processing.
Definition: composite_iterators.h:291
String m_first_row_next_group
If applicable, packed version of the first row in the next group.
Definition: composite_iterators.h:304
table_map m_save_nullinfo
Used to save NULL information in the specific case where we have zero input rows.
Definition: composite_iterators.h:258
enum AggregateIterator::@65 m_state
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:241
int m_output_slice
The slice we're setting when returning rows.
Definition: composite_iterators.h:313
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:220
const bool m_rollup
Whether this is a rollup query.
Definition: composite_iterators.h:261
std::span< AccessPath * > m_single_row_index_lookups
All the single-row index lookups that provide rows to this iterator.
Definition: composite_iterators.h:307
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:215
void SetRollupLevel(int level)
Definition: composite_iterators.cc:503
int m_last_unchanged_group_item_idx
For rollup: The index of the first group item that did not change when we last switched groups.
Definition: composite_iterators.h:273
Takes in two or more iterators and output rows from them sequentially (first all rows from the first ...
Definition: composite_iterators.h:865
size_t m_current_iterator_index
Definition: composite_iterators.h:882
AppendIterator(THD *thd, std::vector< unique_ptr_destroy_only< RowIterator > > &&sub_iterators)
Definition: composite_iterators.cc:4526
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.cc:4563
int DoRead() override
Definition: composite_iterators.cc:4538
bool DoInit() override
Definition: composite_iterators.cc:4532
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.cc:4573
void UnlockRow() override
Definition: composite_iterators.cc:4581
std::vector< unique_ptr_destroy_only< RowIterator > > m_sub_iterators
Definition: composite_iterators.h:881
bool m_pfs_batch_mode_enabled
Definition: composite_iterators.h:883
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.cc:4568
A wrapper class which provides array bounds checking.
Definition: sql_array.h:48
An iterator that helps invalidating caches.
Definition: composite_iterators.h:400
CacheInvalidatorIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_iterator, const std::string &name)
Definition: composite_iterators.h:402
int DoRead() override
Definition: composite_iterators.h:415
std::string m_name
Definition: composite_iterators.h:434
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:421
unique_ptr_destroy_only< RowIterator > m_source_iterator
Definition: composite_iterators.h:432
void UnlockRow() override
Definition: composite_iterators.h:426
bool DoInit() override
Definition: composite_iterators.h:410
int64_t m_generation
Definition: composite_iterators.h:433
std::string name() const
Definition: composite_iterators.h:429
int64_t generation() const
Definition: composite_iterators.h:428
This is used for segregating rows in groups (e.g.
Definition: item.h:6649
An iterator that takes in a stream of rows and passes through only those that meet some criteria (i....
Definition: composite_iterators.h:82
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:93
void UnlockRow() override
Definition: composite_iterators.h:96
int DoRead() override
Definition: composite_iterators.cc:120
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:102
bool m_no_rows
Definition: composite_iterators.h:104
FilterIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, Item *condition)
Definition: composite_iterators.h:84
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:92
Item * m_condition
Definition: composite_iterators.h:103
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:88
bool DoInit() override
Definition: composite_iterators.cc:97
FollowTailIterator is a special version of TableScanIterator that is used as part of WITH RECURSIVE q...
Definition: basic_row_iterators.h:476
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:929
Definition: sql_optimizer.h:133
Definition: key.h:113
Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read() stops as soon as it's se...
Definition: composite_iterators.h:111
void UnlockRow() override
Definition: composite_iterators.h:150
LimitOffsetIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *skipped_rows)
Definition: composite_iterators.h:127
ha_rows m_seen_rows
Definition: composite_iterators.h:161
const bool m_count_all_rows
Definition: composite_iterators.h:169
ha_rows * m_skipped_rows
Definition: composite_iterators.h:171
int DoRead() override
Definition: composite_iterators.cc:161
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:147
const ha_rows m_limit
Definition: composite_iterators.h:168
const ha_rows m_offset
Definition: composite_iterators.h:168
bool m_needs_offset
Whether we have OFFSET rows that we still need to skip.
Definition: composite_iterators.h:166
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:142
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:156
const bool m_reject_multiple_rows
Definition: composite_iterators.h:170
bool DoInit() override
Definition: composite_iterators.cc:147
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:146
MaterializeInformationSchemaTableIterator makes sure a given I_S temporary table is materialized (fil...
Definition: composite_iterators.h:831
Item * m_condition
Definition: composite_iterators.h:857
void UnlockRow() override
Definition: composite_iterators.h:848
int DoRead() override
Definition: composite_iterators.h:852
MaterializeInformationSchemaTableIterator(THD *thd, unique_ptr_destroy_only< RowIterator > table_iterator, Table_ref *table_list, Item *condition)
Definition: composite_iterators.cc:4501
Table_ref * m_table_list
Definition: composite_iterators.h:856
bool DoInit() override
Definition: composite_iterators.cc:4509
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:837
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:842
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:841
unique_ptr_destroy_only< RowIterator > m_table_iterator
The iterator that reads from the materialized table.
Definition: composite_iterators.h:855
An iterator that wraps a Table_function (e.g.
Definition: composite_iterators.h:622
Table_function * m_table_function
Definition: composite_iterators.h:647
unique_ptr_destroy_only< RowIterator > m_table_iterator
Definition: composite_iterators.h:645
int DoRead() override
Definition: composite_iterators.h:643
bool DoInit() override
Definition: composite_iterators.cc:4262
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:632
void UnlockRow() override
Definition: composite_iterators.h:639
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:628
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:633
MaterializedTableFunctionIterator(THD *thd, Table_function *table_function, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator)
Definition: composite_iterators.cc:4255
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:432
A simple nested loop join, taking in two iterators (left/outer and right/inner) and joining them toge...
Definition: composite_iterators.h:332
int DoRead() override
Definition: composite_iterators.cc:526
void UnlockRow() override
Definition: composite_iterators.h:364
@ END_OF_ROWS
Definition: composite_iterators.h:381
@ READING_INNER_ROWS
Definition: composite_iterators.h:380
@ NEEDS_OUTER_ROW
Definition: composite_iterators.h:378
@ READING_FIRST_INNER_ROW
Definition: composite_iterators.h:379
const bool m_pfs_batch_mode
Whether to use batch mode when scanning the inner iterator.
Definition: composite_iterators.h:389
NestedLoopIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, JoinType join_type, bool pfs_batch_mode)
Definition: composite_iterators.h:334
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:385
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:359
const JoinType m_join_type
Definition: composite_iterators.h:386
bool DoInit() override
Definition: composite_iterators.cc:515
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:353
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:384
enum NestedLoopIterator::@66 m_state
An iterator that is semantically equivalent to a semijoin NestedLoopIterator immediately followed by ...
Definition: composite_iterators.h:791
void UnlockRow() override
Definition: composite_iterators.h:808
int DoRead() override
Definition: composite_iterators.cc:4444
KEY * m_key
Definition: composite_iterators.h:821
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:817
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:798
const size_t m_key_len
Definition: composite_iterators.h:823
bool m_deduplicate_against_previous_row
Definition: composite_iterators.h:824
bool DoInit() override
Definition: composite_iterators.cc:4436
uchar * m_key_buf
Definition: composite_iterators.h:822
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:818
NestedLoopSemiJoinWithDuplicateRemovalIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:4421
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:803
const TABLE * m_table_outer
Definition: composite_iterators.h:820
An iterator that removes consecutive rows that are the same according to a set of items (typically th...
Definition: composite_iterators.h:701
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:711
RemoveDuplicatesIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, std::span< Item * > group_items)
Definition: composite_iterators.cc:4336
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:712
void UnlockRow() override
Definition: composite_iterators.h:715
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:707
bool m_first_row
Definition: composite_iterators.h:722
bool DoInit() override
Definition: composite_iterators.cc:4348
Bounds_checked_array< Cached_item * > m_caches
Definition: composite_iterators.h:721
int DoRead() override
Definition: composite_iterators.cc:4353
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:720
Much like RemoveDuplicatesIterator, but works on the basis of a given index (or more accurately,...
Definition: composite_iterators.h:731
int DoRead() override
Definition: composite_iterators.cc:4395
void UnlockRow() override
Definition: composite_iterators.h:745
uchar * m_key_buf
Definition: composite_iterators.h:754
bool m_first_row
Definition: composite_iterators.h:756
const TABLE * m_table
Definition: composite_iterators.h:752
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:741
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:737
const size_t m_key_len
Definition: composite_iterators.h:755
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:742
RemoveDuplicatesOnIndexIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:4380
KEY * m_key
Definition: composite_iterators.h:753
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:751
bool DoInit() override
Definition: composite_iterators.cc:4390
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
THD * thd() const
Definition: row_iterator.h:255
Definition: sql_executor.h:95
StreamingIterator is a minimal version of MaterializeIterator that does not actually materialize; ins...
Definition: composite_iterators.h:569
bool DoInit() override
Definition: composite_iterators.cc:3821
JOIN *const m_join
Definition: composite_iterators.h:602
void UnlockRow() override
Definition: composite_iterators.h:594
int DoRead() override
Definition: composite_iterators.cc:3844
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:588
StreamingIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, bool provide_rowid, JOIN *join, int ref_slice)
Definition: composite_iterators.cc:3792
Temp_table_param * m_temp_table_param
Definition: composite_iterators.h:600
const bool m_provide_rowid
Definition: composite_iterators.h:609
const int m_output_slice
Definition: composite_iterators.h:603
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:591
int m_input_slice
Definition: composite_iterators.h:604
unique_ptr_destroy_only< RowIterator > m_subquery_iterator
Definition: composite_iterators.h:599
ha_rows m_row_number
Definition: composite_iterators.h:601
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:169
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
Definition: row_iterator.h:267
TABLE * table() const
Definition: row_iterator.h:279
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2958
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:97
Like semijoin materialization, weedout works on the basic idea that a semijoin is just like an inner ...
Definition: composite_iterators.h:669
bool DoInit() override
Definition: composite_iterators.cc:4288
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:686
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:678
WeedoutIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for)
Definition: composite_iterators.cc:4275
const table_map m_tables_to_get_rowid_for
Definition: composite_iterators.h:688
int DoRead() override
Definition: composite_iterators.cc:4305
SJ_TMP_TABLE * m_sj
Definition: composite_iterators.h:687
void UnlockRow() override
Definition: composite_iterators.h:681
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:674
A structure that contains a list of input tables for a hash join operation, BKA join operation or a s...
Definition: pack_rows.h:84
JoinType
Definition: join_type.h:28
@ ANTI
Left antijoin, i.e.
@ SEMI
Left semijoin, i.e.
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:480
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1228
Some integer typedefs for easier portability.
unsigned long long int ulonglong
Definition: my_inttypes.h:56
unsigned char uchar
Definition: my_inttypes.h:52
uint64_t table_map
Definition: my_table_map.h:30
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Definition: composite_iterators.h:437
RowIterator * CreateIterator(THD *thd, Mem_root_array< materialize_iterator::Operand > operands, const MaterializePathParameters *path_params, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join)
Create an iterator that materializes a set of row into a temporary table and sets up a (pre-existing)...
Definition: composite_iterators.cc:3769
std::string join(const detail::range auto &rng, std::string_view delim)
join elements of a range into a string separated by a delimiter.
Definition: string.h:74
Define std::hash<Gtid>.
Definition: gtid.h:355
Definition: composite_iterators.h:531
RowIterator * CreateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join, int ref_slice)
Create an iterator that aggregates the output rows from another iterator into a temporary table and t...
Definition: composite_iterators.cc:4227
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2880
Generic routines for packing rows (possibly from multiple tables at the same time) into strings,...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
repeated Source source
Definition: replication_asynchronous_connection_failover.proto:42
join_type
Definition: sql_opt_exec_shared.h:184
Our own string classes, used pervasively throughout the executor.
Definition: materialize_path_parameters.h:40
Definition: table.h:1456
An operand (query block) to be materialized by MaterializeIterator.
Definition: composite_iterators.h:442
unique_ptr_destroy_only< RowIterator > subquery_iterator
The iterator to read the actual rows from.
Definition: composite_iterators.h:444
bool copy_items
If set to false, the Field objects in the output row are presumed already to be filled out.
Definition: composite_iterators.h:472
Temp_table_param * temp_table_param
If copy_items is true, used for copying the Field objects into the temporary table row.
Definition: composite_iterators.h:488
double m_estimated_output_rows
The estimated number of rows produced by this block.
Definition: composite_iterators.h:502
ulonglong m_operand_idx
The current operand (i.e.
Definition: composite_iterators.h:479
bool is_recursive_reference
Definition: composite_iterators.h:492
FollowTailIterator * recursive_reader
Definition: composite_iterators.h:499
int select_number
Used only for optimizer trace.
Definition: composite_iterators.h:447
ulonglong m_total_operands
The number of operands (i.e.
Definition: composite_iterators.h:476
uint m_first_distinct
Used for EXCEPT computation: the index of the first operand involved in a N-ary except operation whic...
Definition: composite_iterators.h:484
bool disable_deduplication_by_hash_field
If true, de-duplication checking via hash key is disabled when materializing this query block (ie....
Definition: composite_iterators.h:467
JOIN * join
The JOIN that this query block represents.
Definition: composite_iterators.h:454