MySQL 9.1.0
Source Code Documentation
composite_iterators.h
Go to the documentation of this file.
1#ifndef SQL_ITERATORS_COMPOSITE_ITERATORS_H_
2#define SQL_ITERATORS_COMPOSITE_ITERATORS_H_
3
4/* Copyright (c) 2018, 2024, Oracle and/or its affiliates.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License, version 2.0,
8 as published by the Free Software Foundation.
9
10 This program is designed to work with certain software (including
11 but not limited to OpenSSL) that is licensed under separate terms,
12 as designated in a particular file or component or in included license
13 documentation. The authors of MySQL hereby grant you an additional
14 permission to link the program and your derivative works with the
15 separately licensed software that they have either included with
16 the program or referenced in the documentation.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License, version 2.0, for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
26
27/**
28 @file composite_iterators.h
29
30 A composite row iterator is one that takes in one or more existing iterators
31 and processes their rows in some interesting way. They are usually not bound
32 to a single table or similar, but are the inner (non-leaf) nodes of the
33 iterator execution tree. They consistently own their source iterator, although
34 not its memory (since we never allocate row iterators on the heap--usually on
35 a MEM_ROOT>). This means that in the end, you'll end up with a single root
36 iterator which then owns everything else recursively.
37
38 SortingIterator and the two window iterators are also composite iterators,
39 but are defined in their own files.
40 */
41
42#include <assert.h>
43#include <stddef.h>
44#include <stdint.h>
45#include <sys/types.h>
46#include <memory>
47#include <string>
48#include <utility>
49#include <vector>
50
51#include "my_alloc.h"
52#include "my_base.h"
53#include "my_inttypes.h"
54#include "my_table_map.h"
56#include "sql/join_type.h"
57#include "sql/mem_root_array.h"
58#include "sql/pack_rows.h"
59#include "sql/sql_array.h"
60#include "sql_string.h"
61
62class Cached_item;
64class Item;
65class JOIN;
66class KEY;
68class SJ_TMP_TABLE;
69class Table_ref;
70class THD;
71class Table_function;
73struct TABLE;
74
75/**
76 An iterator that takes in a stream of rows and passes through only those that
77 meet some criteria (i.e., a condition evaluates to true). This is typically
78 used for WHERE/HAVING.
79 */
80class FilterIterator final : public RowIterator {
81 public:
83 Item *condition)
84 : RowIterator(thd), m_source(std::move(source)), m_condition(condition) {}
85
86 bool Init() override { return m_source->Init(); }
87
88 int Read() override;
89
90 void SetNullRowFlag(bool is_null_row) override {
91 m_source->SetNullRowFlag(is_null_row);
92 }
93
94 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
95 void EndPSIBatchModeIfStarted() override {
96 m_source->EndPSIBatchModeIfStarted();
97 }
98 void UnlockRow() override { m_source->UnlockRow(); }
99
100 private:
103};
104
105/**
106 Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read()
107 stops as soon as it's seen "limit" rows (including any skipped by offset).
108 */
109class LimitOffsetIterator final : public RowIterator {
110 public:
111 /**
112 @param thd Thread context
113 @param source Row source
114 @param limit Maximum number of rows to read, including the ones skipped by
115 offset. Can be HA_POS_ERROR for no limit.
116 @param offset Number of initial rows to skip. Can be 0 for no offset.
117 @param count_all_rows If true, the query will run to completion to get
118 more accurate numbers for skipped_rows, so you will not get any
119 performance benefits of early end.
120 @param reject_multiple_rows True if a derived table transformed from a
121 scalar subquery needs a run-time cardinality check
122 @param skipped_rows If not nullptr, is incremented for each row skipped by
123 offset or limit.
124 */
126 ha_rows limit, ha_rows offset, bool count_all_rows,
127 bool reject_multiple_rows, ha_rows *skipped_rows)
128 : RowIterator(thd),
129 m_source(std::move(source)),
130 m_limit(limit),
131 m_offset(offset),
132 m_count_all_rows(count_all_rows),
133 m_reject_multiple_rows(reject_multiple_rows),
134 m_skipped_rows(skipped_rows) {
135 if (count_all_rows) {
136 assert(m_skipped_rows != nullptr);
137 }
138 }
139
140 bool Init() override;
141
142 int Read() override;
143
144 void SetNullRowFlag(bool is_null_row) override {
145 m_source->SetNullRowFlag(is_null_row);
146 }
147
148 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
149 void EndPSIBatchModeIfStarted() override {
150 m_source->EndPSIBatchModeIfStarted();
151 }
152 void UnlockRow() override { m_source->UnlockRow(); }
153
154 private:
156
157 // Note: The number of seen rows starts off at m_limit if we have OFFSET,
158 // which means we don't need separate LIMIT and OFFSET tests on the
159 // fast path of Read().
161
162 /**
163 Whether we have OFFSET rows that we still need to skip.
164 */
166
171};
172
173/**
174 Handles aggregation (typically used for GROUP BY) for the case where the rows
175 are already properly grouped coming in, ie., all rows that are supposed to be
176 part of the same group are adjacent in the input stream. (This could be
177 because they were sorted earlier, because we are scanning an index that
178 already gives us the rows in a group-compatible order, or because there is no
179 grouping.)
180
181 AggregateIterator needs to be able to save and restore rows; it doesn't know
182 when a group ends until it's seen the first row that is part of the _next_
183 group. When that happens, it needs to tuck away that next row, and then
184 restore the previous row so that the output row gets the correct grouped
185 values. A simple example, doing SELECT a, SUM(b) FROM t1 GROUP BY a:
186
187 t1.a t1.b SUM(b)
188 1 1 <-- first row, save it 1
189 1 2 3
190 1 3 6
191 2 1 <-- group changed, save row
192 [1 1] <-- restore first row, output 6
193 reset aggregate --> 0
194 [2 1] <-- restore new row, process it 1
195 2 10 11
196 <-- EOF, output 11
197
198 To save and restore rows like this, it uses the infrastructure from
199 pack_rows.h to pack and unpack all relevant rows into record[0] of every input
200 table. (Currently, there can only be one input table, but this may very well
201 change in the future.) It would be nice to have a more abstract concept of
202 sending a row around and taking copies of it if needed, as opposed to it
203 implicitly staying in the table's buffer. (This would also solve some
204 issues in EQRefIterator and when synthesizing NULL rows for outer joins.)
205 However, that's a large refactoring.
206 */
207class AggregateIterator final : public RowIterator {
208 public:
210 JOIN *join, pack_rows::TableCollection tables, bool rollup);
211
212 bool Init() override;
213 int Read() override;
214 void SetNullRowFlag(bool is_null_row) override {
215 m_source->SetNullRowFlag(is_null_row);
216 }
217
218 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
219 void EndPSIBatchModeIfStarted() override {
220 m_source->EndPSIBatchModeIfStarted();
221 }
222 void UnlockRow() override {
223 // Most likely, HAVING failed. Ideally, we'd like to backtrack and
224 // unlock all rows that went into this aggregate, but we can't do that,
225 // and we also can't unlock the _current_ row, since that belongs to a
226 // different group. Thus, do nothing.
227 }
228
229 private:
230 enum {
236
238
239 /**
240 The join we are part of. It would be nicer not to rely on this,
241 but we need a large number of members from there, like which
242 aggregate functions we have, the THD, temporary table parameters
243 and so on.
244 */
245 JOIN *m_join = nullptr;
246
247 /// Whether we have seen the last input row.
249
250 /**
251 Used to save NULL information in the specific case where we have
252 zero input rows.
253 */
255
256 /// Whether this is a rollup query.
257 const bool m_rollup;
258
259 /**
260 For rollup: The index of the first group item that did _not_ change when we
261 last switched groups. E.g., if we have group fields A,B,C,D and then switch
262 to group A,B,E,D, this value will become 1 (which means that we need
263 to output rollup rows for 2 -- A,B,E,NULL -- and then 1 -- A,B,NULL,NULL).
264 m_current_rollup_position will count down from the end until it becomes
265 less than this value.
266
267 If we do not have rollup, this value is perennially zero.
268 */
270
271 /**
272 If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
273 This value will start at the index of the last group expression and then
274 count backwards down to and including m_last_unchanged_group_item_idx.
275 It is used to communicate to the rollup group items whether to turn
276 themselves into NULLs, and the sum items which of their sums to output.
277 */
279
280 /**
281 The list of tables we are reading from; they are the ones for which we need
282 to save and restore rows.
283 */
285
286 /// Packed version of the first row in the group we are currently processing.
288
289 /**
290 If applicable, packed version of the first row in the _next_ group. This is
291 used only in the LAST_ROW_STARTED_NEW_GROUP state; we just saw a row that
292 didn't belong to the current group, so we saved it here and went to output
293 a group. On the next Read() call, we need to process this deferred row
294 first of all.
295
296 Even when not in use, this string contains a buffer that is large enough to
297 pack a full row into, sans blobs. (If blobs are present,
298 StoreFromTableBuffers() will automatically allocate more space if needed.)
299 */
301
302 /**
303 The slice we're setting when returning rows. See the comment in the
304 constructor.
305 */
307
308 void SetRollupLevel(int level);
309};
310
311/**
312 A simple nested loop join, taking in two iterators (left/outer and
313 right/inner) and joining them together. This may, of course, scan the inner
314 iterator many times. It is currently the only form of join we have.
315
316 The iterator works as a state machine, where the state records whether we need
317 to read a new outer row or not, and whether we've seen any rows from the inner
318 iterator at all (if not, an outer join need to synthesize a new NULL row).
319
320 The iterator takes care of activating performance schema batch mode on the
321 right iterator if needed; this is typically only used if it is the innermost
322 table in the entire join (where the gains from turning on batch mode is the
323 largest, and the accuracy loss from turning it off are the least critical).
324 */
325class NestedLoopIterator final : public RowIterator {
326 public:
330 JoinType join_type, bool pfs_batch_mode)
331 : RowIterator(thd),
332 m_source_outer(std::move(source_outer)),
333 m_source_inner(std::move(source_inner)),
335 m_pfs_batch_mode(pfs_batch_mode) {
336 assert(m_source_outer != nullptr);
337 assert(m_source_inner != nullptr);
338
339 // Batch mode makes no sense for anti- or semijoins, since they should only
340 // be reading one row.
342 assert(!pfs_batch_mode);
343 }
344 }
345
346 bool Init() override;
347
348 int Read() override;
349
350 void SetNullRowFlag(bool is_null_row) override {
351 // TODO: write something here about why we can't do this lazily.
352 m_source_outer->SetNullRowFlag(is_null_row);
353 m_source_inner->SetNullRowFlag(is_null_row);
354 }
355
356 void EndPSIBatchModeIfStarted() override {
357 m_source_outer->EndPSIBatchModeIfStarted();
358 m_source_inner->EndPSIBatchModeIfStarted();
359 }
360
361 void UnlockRow() override {
362 // Since we don't know which condition that caused the row to be rejected,
363 // we can't know whether we could also unlock the outer row
364 // (it may still be used as parts of other joined rows).
366 m_source_inner->UnlockRow();
367 }
368 }
369
370 private:
371 enum {
377
381
382 /** Whether to use batch mode when scanning the inner iterator. */
384};
385
386/**
387 An iterator that helps invalidating caches. Every time a row passes through it
388 or it changes state in any other way, it increments its “generation” counter.
389 This allows MaterializeIterator to see whether any of its dependencies has
390 changed, and then force a rematerialization -- this is typically used for
391 LATERAL tables, where we're joining in a derived table that depends on
392 something earlier in the join.
393 */
395 public:
398 const std::string &name)
399 : RowIterator(thd),
400 m_source_iterator(std::move(source_iterator)),
401 m_name(name) {}
402
403 bool Init() override {
404 ++m_generation;
405 return m_source_iterator->Init();
406 }
407
408 int Read() override {
409 ++m_generation;
410 return m_source_iterator->Read();
411 }
412
413 void SetNullRowFlag(bool is_null_row) override {
414 ++m_generation;
415 m_source_iterator->SetNullRowFlag(is_null_row);
416 }
417
418 void UnlockRow() override { m_source_iterator->UnlockRow(); }
419
420 int64_t generation() const { return m_generation; }
421 std::string name() const { return m_name; }
422
423 private:
425 int64_t m_generation = 0;
426 std::string m_name;
427};
428
430/**
431 An operand (query block) to be materialized by MaterializeIterator.
432 (@see MaterializeIterator for details.)
433*/
434struct Operand {
435 /// The iterator to read the actual rows from.
437
438 /// Used only for optimizer trace.
440
441 /// The JOIN that this query block represents. Used for performance
442 /// schema batch mode: When materializing a query block that consists of
443 /// a single table, MaterializeIterator needs to set up schema batch mode,
444 /// since there is no nested loop iterator to do it. (This is similar to
445 /// what ExecuteIteratorQuery() needs to do at the top level.)
447
448 /// If true, de-duplication checking via hash key is disabled
449 /// when materializing this query block (ie., we simply avoid calling
450 /// check_unique_fields() for each row). Used when materializing
451 /// UNION DISTINCT and UNION ALL parts into the same table.
452 /// We'd like to just use a unique constraint via unique index instead,
453 /// but there might be other indexes on the destination table
454 /// that we'd like to keep, and the implementation doesn't allow
455 /// disabling only one index.
456 ///
457 /// If you use this on a query block, doing_hash_deduplication()
458 /// must be true.
460
461 /// If set to false, the Field objects in the output row are
462 /// presumed already to be filled out. This is the case iff
463 /// there's a windowing iterator earlier in the chain.
465
466 /// The number of operands (i.e. blocks) involved in the set operation:
467 /// used for INTERSECT to determine if a value is present in all operands
469 /// The current operand (i.e. block) number, starting at zero. We use this
470 /// for INTERSECT and EXCEPT materialization operand.
472 /// Used for EXCEPT computation: the index of the first operand involved in
473 /// a N-ary except operation which has DISTINCT. This is significant for
474 /// calculating whether to set the counter to zero or just decrement it
475 /// when we see a right side operand.
477
478 /// If copy_items is true, used for copying the Field objects
479 /// into the temporary table row. Otherwise unused.
481
482 // Whether this query block is a recursive reference back to the
483 // output of the materialization.
485
486 // If is_recursive_reference is true, contains the FollowTailIterator
487 // in the query block (there can be at most one recursive reference
488 // in a join list, as per the SQL standard, so there should be exactly one).
489 // Used for informing the iterators about various shared state in the
490 // materialization (including coordinating rematerializations).
492
493 /// The estimated number of rows produced by this block
495};
496
497/**
498 Create an iterator that materializes a set of row into a temporary table
499 and sets up a (pre-existing) iterator to access that.
500 @see MaterializeIterator.
501
502 @param thd Thread handler.
503 @param operands List of operands (query blocks) to materialize.
504 @param path_params MaterializePath settings.
505 @param table_iterator Iterator used for accessing the temporary table
506 after materialization.
507 @param join
508 When materializing within the same JOIN (e.g., into a temporary table
509 before sorting), as opposed to a derived table or a CTE, we may need
510 to change the slice on the join before returning rows from the result
511 table. If so, join and ref_slice would need to be set, and
512 query_blocks_to_materialize should contain only one member, with the same
513 join.
514 @return the iterator.
515*/
518 const MaterializePathParameters *path_params,
520
521} // namespace materialize_iterator
522
524/**
525 Create an iterator that aggregates the output rows from another iterator
526 into a temporary table and then sets up a (pre-existing) iterator to
527 access the temporary table.
528 @see TemptableAggregateIterator.
529
530 @param thd Thread handler.
531 @param subquery_iterator input to aggregation.
532 @param temp_table_param temporary table settings.
533 @param table_iterator Iterator used for scanning the temporary table
534 after materialization.
535 @param table the temporary table.
536 @param join the JOIN in which we aggregate.
537 @param ref_slice the slice to set when accessing temporary table;
538 used if anything upstream wants to evaluate values based on its contents.
539 @return the iterator.
540*/
542 THD *thd, unique_ptr_destroy_only<RowIterator> subquery_iterator,
543 Temp_table_param *temp_table_param, TABLE *table,
545 int ref_slice);
546
547} // namespace temptable_aggregate_iterator
548
549/**
550 StreamingIterator is a minimal version of MaterializeIterator that does not
551 actually materialize; instead, every Read() just forwards the call to the
552 subquery iterator and does the required copying from one set of fields to
553 another.
554
555 It is used for when the optimizer would normally set up a materialization,
556 but you don't actually need one, ie. you don't want to read the rows multiple
557 times after writing them, and you don't want to access them by index (only
558 a single table scan). It also takes care of setting the NULL row flag
559 on the temporary table.
560 */
562 public:
563 /**
564 @param thd Thread handle.
565 @param subquery_iterator The iterator to read rows from.
566 @param temp_table_param Parameters for the temp table.
567 @param table The table we are streaming through. Will never actually
568 be written to, but its fields will be used.
569 @param provide_rowid If true, generate a row ID for each row we stream.
570 This is used if the parent needs row IDs for deduplication, in particular
571 weedout.
572 @param join See MaterializeIterator.
573 @param ref_slice See MaterializeIterator.
574 */
576 unique_ptr_destroy_only<RowIterator> subquery_iterator,
577 Temp_table_param *temp_table_param, TABLE *table,
578 bool provide_rowid, JOIN *join, int ref_slice);
579
580 bool Init() override;
581
582 int Read() override;
583
584 void StartPSIBatchMode() override {
585 m_subquery_iterator->StartPSIBatchMode();
586 }
587 void EndPSIBatchModeIfStarted() override {
588 m_subquery_iterator->EndPSIBatchModeIfStarted();
589 }
590 void UnlockRow() override { m_subquery_iterator->UnlockRow(); }
591
592 private:
596 JOIN *const m_join;
597 const int m_output_slice;
599
600 // Whether the iterator should generate and provide a row ID. Only true if the
601 // iterator is part of weedout, where the iterator will create a fake row ID
602 // to uniquely identify the rows it produces.
603 const bool m_provide_rowid;
604};
605
606/**
607 An iterator that wraps a Table_function (e.g. JSON_TABLE) and allows you to
608 iterate over the materialized temporary table. The table is materialized anew
609 for every Init().
610
611 TODO: Just wrapping it is probably not the optimal thing to do;
612 Table_function is highly oriented around materialization, but never caches.
613 Thus, perhaps we should rewrite Table_function to return a RowIterator
614 instead of going through a temporary table.
615 */
617 public:
619 THD *thd, Table_function *table_function, TABLE *table,
621
622 bool Init() override;
623 int Read() override { return m_table_iterator->Read(); }
624 void SetNullRowFlag(bool is_null_row) override {
625 m_table_iterator->SetNullRowFlag(is_null_row);
626 }
627
628 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
629 void EndPSIBatchModeIfStarted() override {
630 m_table_iterator->EndPSIBatchModeIfStarted();
631 }
632
633 // The temporary table is private to us, so there's no need to worry about
634 // locks to other transactions.
635 void UnlockRow() override {}
636
637 private:
639
641};
642
643/**
644 Like semijoin materialization, weedout works on the basic idea that a semijoin
645 is just like an inner join as we long as we can get rid of the duplicates
646 somehow. (This is advantageous, because inner joins can be reordered, whereas
647 semijoins generally can't.) However, unlike semijoin materialization, weedout
648 removes duplicates after the join, not before it. Consider something like
649
650 SELECT * FROM t1 WHERE a IN ( SELECT b FROM t2 );
651
652 Semijoin materialization solves this by materializing t2, with deduplication,
653 and then joining. Weedout joins t1 to t2 and then leaves only one output row
654 per t1 row. The disadvantage is that this potentially needs to discard more
655 rows; the (potential) advantage is that we deduplicate on t1 instead of t2.
656
657 Weedout, unlike materialization, works in a streaming fashion; rows are output
658 (or discarded) as they come in, with a temporary table used for recording the
659 row IDs we've seen before. (We need to deduplicate on t1's row IDs, not its
660 contents.) See SJ_TMP_TABLE for details about the table format.
661 */
662class WeedoutIterator final : public RowIterator {
663 public:
665 SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for);
666
667 bool Init() override;
668 int Read() override;
669
670 void SetNullRowFlag(bool is_null_row) override {
671 m_source->SetNullRowFlag(is_null_row);
672 }
673
674 void EndPSIBatchModeIfStarted() override {
675 m_source->EndPSIBatchModeIfStarted();
676 }
677 void UnlockRow() override { m_source->UnlockRow(); }
678
679 private:
683};
684
685/**
686 An iterator that removes consecutive rows that are the same according to
687 a set of items (typically the join key), so-called “loose scan”
688 (not to be confused with “loose index scan”, which is made by the
689 range optimizer). This is similar in spirit to WeedoutIterator above
690 (removing duplicates allows us to treat the semijoin as a normal join),
691 but is much cheaper if the data is already ordered/grouped correctly,
692 as the removal can happen before the join, and it does not need a
693 temporary table.
694 */
696 public:
699 JOIN *join, Item **group_items,
700 int group_items_size);
701
702 bool Init() override;
703 int Read() override;
704
705 void SetNullRowFlag(bool is_null_row) override {
706 m_source->SetNullRowFlag(is_null_row);
707 }
708
709 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
710 void EndPSIBatchModeIfStarted() override {
711 m_source->EndPSIBatchModeIfStarted();
712 }
713 void UnlockRow() override { m_source->UnlockRow(); }
714
715 private:
719};
720
721/**
722 Much like RemoveDuplicatesIterator, but works on the basis of a given index
723 (or more accurately, its keypart), not an arbitrary list of grouped fields.
724 This is only used in the non-hypergraph optimizer; the hypergraph optimizer
725 can deal with groupings that come from e.g. sorts.
726 */
728 public:
731 const TABLE *table, KEY *key, size_t key_len);
732
733 bool Init() override;
734 int Read() override;
735
736 void SetNullRowFlag(bool is_null_row) override {
737 m_source->SetNullRowFlag(is_null_row);
738 }
739
740 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
741 void EndPSIBatchModeIfStarted() override {
742 m_source->EndPSIBatchModeIfStarted();
743 }
744 void UnlockRow() override { m_source->UnlockRow(); }
745
746 private:
750 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
751 const size_t m_key_len;
753};
754
755/**
756 An iterator that is semantically equivalent to a semijoin NestedLoopIterator
757 immediately followed by a RemoveDuplicatesOnIndexIterator. It is used to
758 implement the “loose scan” strategy in queries with multiple tables on the
759 inside of a semijoin, like
760
761 ... FROM t1 WHERE ... IN ( SELECT ... FROM t2 JOIN t3 ... )
762
763 In this case, the query tree without this iterator would ostensibly look like
764
765 -> Nested loop join
766 -> Table scan on t1
767 -> Remove duplicates on t2_idx
768 -> Nested loop semijoin
769 -> Index scan on t2 using t2_idx
770 -> Filter (e.g. t3.a = t2.a)
771 -> Table scan on t3
772
773 (t3 will be marked as “first match” on t2 when implementing loose scan,
774 thus the semijoin.)
775
776 First note that we can't put the duplicate removal directly on t2 in this
777 case, as the first t2 row doesn't necessarily match anything in t3, so it
778 needs to be above. However, this is wasteful, because once we find a matching
779 t2/t3 pair, we should stop scanning t3 until we have a new t2.
780
781 NestedLoopSemiJoinWithDuplicateRemovalIterator solves the problem by doing
782 exactly this; it gets a row from the outer side, gets exactly one row from the
783 inner side, and then skips over rows from the outer side (_without_ scanning
784 the inner side) until its keypart changes.
785 */
787 : public RowIterator {
788 public:
792 KEY *key, size_t key_len);
793
794 bool Init() override;
795
796 int Read() override;
797
798 void SetNullRowFlag(bool is_null_row) override {
799 m_source_outer->SetNullRowFlag(is_null_row);
800 m_source_inner->SetNullRowFlag(is_null_row);
801 }
802
803 void EndPSIBatchModeIfStarted() override {
804 m_source_outer->EndPSIBatchModeIfStarted();
805 m_source_inner->EndPSIBatchModeIfStarted();
806 }
807
808 void UnlockRow() override {
809 m_source_outer->UnlockRow();
810 m_source_inner->UnlockRow();
811 }
812
813 private:
816
819 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
820 const size_t m_key_len;
822};
823
824/**
825 MaterializeInformationSchemaTableIterator makes sure a given I_S temporary
826 table is materialized (filled out) before we try to scan it.
827 */
829 public:
832 Table_ref *table_list, Item *condition);
833
834 bool Init() override;
835 int Read() override { return m_table_iterator->Read(); }
836
837 void SetNullRowFlag(bool is_null_row) override {
838 m_table_iterator->SetNullRowFlag(is_null_row);
839 }
840
841 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
842 void EndPSIBatchModeIfStarted() override {
843 m_table_iterator->EndPSIBatchModeIfStarted();
844 }
845
846 // The temporary table is private to us, so there's no need to worry about
847 // locks to other transactions.
848 void UnlockRow() override {}
849
850 private:
851 /// The iterator that reads from the materialized table.
855};
856
857/**
858 Takes in two or more iterators and output rows from them sequentially
859 (first all rows from the first one, the all from the second one, etc.).
860 Used for implementing UNION ALL, typically together with StreamingIterator.
861 */
862class AppendIterator final : public RowIterator {
863 public:
865 THD *thd,
867
868 bool Init() override;
869 int Read() override;
870
871 void StartPSIBatchMode() override;
872 void EndPSIBatchModeIfStarted() override;
873
874 void SetNullRowFlag(bool is_null_row) override;
875 void UnlockRow() override;
876
877 private:
878 std::vector<unique_ptr_destroy_only<RowIterator>> m_sub_iterators;
881};
882
883#endif // SQL_ITERATORS_COMPOSITE_ITERATORS_H_
Handles aggregation (typically used for GROUP BY) for the case where the rows are already properly gr...
Definition: composite_iterators.h:207
void UnlockRow() override
Definition: composite_iterators.h:222
int m_current_rollup_position
If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
Definition: composite_iterators.h:278
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:218
int Read() override
Read a single row.
Definition: composite_iterators.cc:258
@ LAST_ROW_STARTED_NEW_GROUP
Definition: composite_iterators.h:232
@ READING_FIRST_ROW
Definition: composite_iterators.h:231
@ OUTPUTTING_ROLLUP_ROWS
Definition: composite_iterators.h:233
@ DONE_OUTPUTTING_ROWS
Definition: composite_iterators.h:234
JOIN * m_join
The join we are part of.
Definition: composite_iterators.h:245
bool m_seen_eof
Whether we have seen the last input row.
Definition: composite_iterators.h:248
enum AggregateIterator::@61 m_state
pack_rows::TableCollection m_tables
The list of tables we are reading from; they are the ones for which we need to save and restore rows.
Definition: composite_iterators.h:284
String m_first_row_this_group
Packed version of the first row in the group we are currently processing.
Definition: composite_iterators.h:287
String m_first_row_next_group
If applicable, packed version of the first row in the next group.
Definition: composite_iterators.h:300
table_map m_save_nullinfo
Used to save NULL information in the specific case where we have zero input rows.
Definition: composite_iterators.h:254
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:211
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:237
int m_output_slice
The slice we're setting when returning rows.
Definition: composite_iterators.h:306
AggregateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, pack_rows::TableCollection tables, bool rollup)
Definition: composite_iterators.cc:193
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:219
const bool m_rollup
Whether this is a rollup query.
Definition: composite_iterators.h:257
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:214
void SetRollupLevel(int level)
Definition: composite_iterators.cc:473
int m_last_unchanged_group_item_idx
For rollup: The index of the first group item that did not change when we last switched groups.
Definition: composite_iterators.h:269
Takes in two or more iterators and output rows from them sequentially (first all rows from the first ...
Definition: composite_iterators.h:862
size_t m_current_iterator_index
Definition: composite_iterators.h:879
AppendIterator(THD *thd, std::vector< unique_ptr_destroy_only< RowIterator > > &&sub_iterators)
Definition: composite_iterators.cc:4450
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.cc:4487
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4456
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.cc:4497
void UnlockRow() override
Definition: composite_iterators.cc:4505
std::vector< unique_ptr_destroy_only< RowIterator > > m_sub_iterators
Definition: composite_iterators.h:878
int Read() override
Read a single row.
Definition: composite_iterators.cc:4462
bool m_pfs_batch_mode_enabled
Definition: composite_iterators.h:880
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.cc:4492
A wrapper class which provides array bounds checking.
Definition: sql_array.h:47
An iterator that helps invalidating caches.
Definition: composite_iterators.h:394
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.h:403
CacheInvalidatorIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_iterator, const std::string &name)
Definition: composite_iterators.h:396
std::string m_name
Definition: composite_iterators.h:426
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:413
unique_ptr_destroy_only< RowIterator > m_source_iterator
Definition: composite_iterators.h:424
void UnlockRow() override
Definition: composite_iterators.h:418
int64_t m_generation
Definition: composite_iterators.h:425
int Read() override
Read a single row.
Definition: composite_iterators.h:408
std::string name() const
Definition: composite_iterators.h:421
int64_t generation() const
Definition: composite_iterators.h:420
This is used for segregating rows in groups (e.g.
Definition: item.h:6484
An iterator that takes in a stream of rows and passes through only those that meet some criteria (i....
Definition: composite_iterators.h:80
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:95
int Read() override
Read a single row.
Definition: composite_iterators.cc:92
void UnlockRow() override
Definition: composite_iterators.h:98
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.h:86
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:101
FilterIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, Item *condition)
Definition: composite_iterators.h:82
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:94
Item * m_condition
Definition: composite_iterators.h:102
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:90
FollowTailIterator is a special version of TableScanIterator that is used as part of WITH RECURSIVE q...
Definition: basic_row_iterators.h:470
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:930
Definition: sql_optimizer.h:133
Definition: key.h:113
Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read() stops as soon as it's se...
Definition: composite_iterators.h:109
void UnlockRow() override
Definition: composite_iterators.h:152
LimitOffsetIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *skipped_rows)
Definition: composite_iterators.h:125
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:117
ha_rows m_seen_rows
Definition: composite_iterators.h:160
const bool m_count_all_rows
Definition: composite_iterators.h:168
ha_rows * m_skipped_rows
Definition: composite_iterators.h:170
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:149
const ha_rows m_limit
Definition: composite_iterators.h:167
const ha_rows m_offset
Definition: composite_iterators.h:167
bool m_needs_offset
Whether we have OFFSET rows that we still need to skip.
Definition: composite_iterators.h:165
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:144
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:155
int Read() override
Read a single row.
Definition: composite_iterators.cc:131
const bool m_reject_multiple_rows
Definition: composite_iterators.h:169
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:148
MaterializeInformationSchemaTableIterator makes sure a given I_S temporary table is materialized (fil...
Definition: composite_iterators.h:828
Item * m_condition
Definition: composite_iterators.h:854
void UnlockRow() override
Definition: composite_iterators.h:848
MaterializeInformationSchemaTableIterator(THD *thd, unique_ptr_destroy_only< RowIterator > table_iterator, Table_ref *table_list, Item *condition)
Definition: composite_iterators.cc:4425
Table_ref * m_table_list
Definition: composite_iterators.h:853
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:837
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:842
int Read() override
Read a single row.
Definition: composite_iterators.h:835
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:841
unique_ptr_destroy_only< RowIterator > m_table_iterator
The iterator that reads from the materialized table.
Definition: composite_iterators.h:852
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4433
An iterator that wraps a Table_function (e.g.
Definition: composite_iterators.h:616
Table_function * m_table_function
Definition: composite_iterators.h:640
unique_ptr_destroy_only< RowIterator > m_table_iterator
Definition: composite_iterators.h:638
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:628
void UnlockRow() override
Definition: composite_iterators.h:635
int Read() override
Read a single row.
Definition: composite_iterators.h:623
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:624
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:629
MaterializedTableFunctionIterator(THD *thd, Table_function *table_function, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator)
Definition: composite_iterators.cc:4182
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4189
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:426
A simple nested loop join, taking in two iterators (left/outer and right/inner) and joining them toge...
Definition: composite_iterators.h:325
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:485
int Read() override
Read a single row.
Definition: composite_iterators.cc:496
void UnlockRow() override
Definition: composite_iterators.h:361
const bool m_pfs_batch_mode
Whether to use batch mode when scanning the inner iterator.
Definition: composite_iterators.h:383
NestedLoopIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, JoinType join_type, bool pfs_batch_mode)
Definition: composite_iterators.h:327
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:379
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:356
const JoinType m_join_type
Definition: composite_iterators.h:380
@ END_OF_ROWS
Definition: composite_iterators.h:375
@ READING_INNER_ROWS
Definition: composite_iterators.h:374
@ NEEDS_OUTER_ROW
Definition: composite_iterators.h:372
@ READING_FIRST_INNER_ROW
Definition: composite_iterators.h:373
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:350
enum NestedLoopIterator::@62 m_state
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:378
An iterator that is semantically equivalent to a semijoin NestedLoopIterator immediately followed by ...
Definition: composite_iterators.h:787
int Read() override
Read a single row.
Definition: composite_iterators.cc:4369
void UnlockRow() override
Definition: composite_iterators.h:808
KEY * m_key
Definition: composite_iterators.h:818
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:814
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:798
const size_t m_key_len
Definition: composite_iterators.h:820
bool m_deduplicate_against_previous_row
Definition: composite_iterators.h:821
uchar * m_key_buf
Definition: composite_iterators.h:819
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:815
NestedLoopSemiJoinWithDuplicateRemovalIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:4346
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:803
const TABLE * m_table_outer
Definition: composite_iterators.h:817
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4361
An iterator that removes consecutive rows that are the same according to a set of items (typically th...
Definition: composite_iterators.h:695
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:709
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:710
void UnlockRow() override
Definition: composite_iterators.h:713
int Read() override
Read a single row.
Definition: composite_iterators.cc:4280
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:705
bool m_first_row
Definition: composite_iterators.h:718
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4275
Bounds_checked_array< Cached_item * > m_caches
Definition: composite_iterators.h:717
RemoveDuplicatesIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, Item **group_items, int group_items_size)
Definition: composite_iterators.cc:4263
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:716
Much like RemoveDuplicatesIterator, but works on the basis of a given index (or more accurately,...
Definition: composite_iterators.h:727
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4317
void UnlockRow() override
Definition: composite_iterators.h:744
uchar * m_key_buf
Definition: composite_iterators.h:750
bool m_first_row
Definition: composite_iterators.h:752
const TABLE * m_table
Definition: composite_iterators.h:748
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:740
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:736
const size_t m_key_len
Definition: composite_iterators.h:751
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:741
RemoveDuplicatesOnIndexIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:4307
KEY * m_key
Definition: composite_iterators.h:749
int Read() override
Read a single row.
Definition: composite_iterators.cc:4322
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:747
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
THD * thd() const
Definition: row_iterator.h:228
Definition: sql_executor.h:95
StreamingIterator is a minimal version of MaterializeIterator that does not actually materialize; ins...
Definition: composite_iterators.h:561
JOIN *const m_join
Definition: composite_iterators.h:596
void UnlockRow() override
Definition: composite_iterators.h:590
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:584
int Read() override
Read a single row.
Definition: composite_iterators.cc:3774
StreamingIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, bool provide_rowid, JOIN *join, int ref_slice)
Definition: composite_iterators.cc:3722
Temp_table_param * m_temp_table_param
Definition: composite_iterators.h:594
const bool m_provide_rowid
Definition: composite_iterators.h:603
const int m_output_slice
Definition: composite_iterators.h:597
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:587
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:3751
int m_input_slice
Definition: composite_iterators.h:598
unique_ptr_destroy_only< RowIterator > m_subquery_iterator
Definition: composite_iterators.h:593
ha_rows m_row_number
Definition: composite_iterators.h:595
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:167
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
Definition: row_iterator.h:234
TABLE * table() const
Definition: row_iterator.h:246
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2900
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:97
Like semijoin materialization, weedout works on the basic idea that a semijoin is just like an inner ...
Definition: composite_iterators.h:662
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:680
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:674
WeedoutIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for)
Definition: composite_iterators.cc:4202
int Read() override
Read a single row.
Definition: composite_iterators.cc:4232
const table_map m_tables_to_get_rowid_for
Definition: composite_iterators.h:682
SJ_TMP_TABLE * m_sj
Definition: composite_iterators.h:681
void UnlockRow() override
Definition: composite_iterators.h:677
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:670
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:4215
A structure that contains a list of input tables for a hash join operation, BKA join operation or a s...
Definition: pack_rows.h:93
JoinType
Definition: join_type.h:28
@ ANTI
Left antijoin, i.e.
@ SEMI
Left semijoin, i.e.
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:480
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1141
Some integer typedefs for easier portability.
unsigned long long int ulonglong
Definition: my_inttypes.h:56
unsigned char uchar
Definition: my_inttypes.h:52
uint64_t table_map
Definition: my_table_map.h:30
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Definition: composite_iterators.h:429
RowIterator * CreateIterator(THD *thd, Mem_root_array< materialize_iterator::Operand > operands, const MaterializePathParameters *path_params, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join)
Create an iterator that materializes a set of row into a temporary table and sets up a (pre-existing)...
Definition: composite_iterators.cc:3699
std::string join(const detail::range auto &rng, std::string_view delim)
join elements of a range into a string separated by a delimiter.
Definition: string.h:74
Definition: gcs_xcom_synode.h:64
Definition: composite_iterators.h:523
RowIterator * CreateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join, int ref_slice)
Create an iterator that aggregates the output rows from another iterator into a temporary table and t...
Definition: composite_iterators.cc:4154
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2876
Generic routines for packing rows (possibly from multiple tables at the same time) into strings,...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
repeated Source source
Definition: replication_asynchronous_connection_failover.proto:42
join_type
Definition: sql_opt_exec_shared.h:186
Our own string classes, used pervasively throughout the executor.
Definition: materialize_path_parameters.h:40
Definition: table.h:1421
An operand (query block) to be materialized by MaterializeIterator.
Definition: composite_iterators.h:434
unique_ptr_destroy_only< RowIterator > subquery_iterator
The iterator to read the actual rows from.
Definition: composite_iterators.h:436
bool copy_items
If set to false, the Field objects in the output row are presumed already to be filled out.
Definition: composite_iterators.h:464
Temp_table_param * temp_table_param
If copy_items is true, used for copying the Field objects into the temporary table row.
Definition: composite_iterators.h:480
double m_estimated_output_rows
The estimated number of rows produced by this block.
Definition: composite_iterators.h:494
ulonglong m_operand_idx
The current operand (i.e.
Definition: composite_iterators.h:471
bool is_recursive_reference
Definition: composite_iterators.h:484
FollowTailIterator * recursive_reader
Definition: composite_iterators.h:491
int select_number
Used only for optimizer trace.
Definition: composite_iterators.h:439
ulonglong m_total_operands
The number of operands (i.e.
Definition: composite_iterators.h:468
uint m_first_distinct
Used for EXCEPT computation: the index of the first operand involved in a N-ary except operation whic...
Definition: composite_iterators.h:476
bool disable_deduplication_by_hash_field
If true, de-duplication checking via hash key is disabled when materializing this query block (ie....
Definition: composite_iterators.h:459
JOIN * join
The JOIN that this query block represents.
Definition: composite_iterators.h:446