MySQL 8.0.37
Source Code Documentation
composite_iterators.h
Go to the documentation of this file.
1#ifndef SQL_ITERATORS_COMPOSITE_ITERATORS_H_
2#define SQL_ITERATORS_COMPOSITE_ITERATORS_H_
3
4/* Copyright (c) 2018, 2024, Oracle and/or its affiliates.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License, version 2.0,
8 as published by the Free Software Foundation.
9
10 This program is designed to work with certain software (including
11 but not limited to OpenSSL) that is licensed under separate terms,
12 as designated in a particular file or component or in included license
13 documentation. The authors of MySQL hereby grant you an additional
14 permission to link the program and your derivative works with the
15 separately licensed software that they have either included with
16 the program or referenced in the documentation.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License, version 2.0, for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
26
27/**
28 @file composite_iterators.h
29
30 A composite row iterator is one that takes in one or more existing iterators
31 and processes their rows in some interesting way. They are usually not bound
32 to a single table or similar, but are the inner (non-leaf) nodes of the
33 iterator execution tree. They consistently own their source iterator, although
34 not its memory (since we never allocate row iterators on the heap--usually on
35 a MEM_ROOT>). This means that in the end, you'll end up with a single root
36 iterator which then owns everything else recursively.
37
38 SortingIterator and the two window iterators are also composite iterators,
39 but are defined in their own files.
40 */
41
42#include <assert.h>
43#include <stdint.h>
44#include <stdio.h>
45#include <memory>
46#include <string>
47#include <utility>
48#include <vector>
49
50#include "my_alloc.h"
51#include "my_base.h"
52#include "my_inttypes.h"
53#include "my_table_map.h"
55#include "sql/join_type.h"
56#include "sql/mem_root_array.h"
57#include "sql/pack_rows.h"
58#include "sql/table.h"
59#include "sql_string.h"
60
61class Cached_item;
63class Item;
64class JOIN;
65class KEY;
68class SJ_TMP_TABLE;
69class THD;
70class Table_function;
72
73/**
74 An iterator that takes in a stream of rows and passes through only those that
75 meet some criteria (i.e., a condition evaluates to true). This is typically
76 used for WHERE/HAVING.
77 */
78class FilterIterator final : public RowIterator {
79 public:
81 Item *condition)
82 : RowIterator(thd), m_source(std::move(source)), m_condition(condition) {}
83
84 bool Init() override { return m_source->Init(); }
85
86 int Read() override;
87
88 void SetNullRowFlag(bool is_null_row) override {
89 m_source->SetNullRowFlag(is_null_row);
90 }
91
92 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
93 void EndPSIBatchModeIfStarted() override {
94 m_source->EndPSIBatchModeIfStarted();
95 }
96 void UnlockRow() override { m_source->UnlockRow(); }
97
98 private:
101};
102
103/**
104 Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read()
105 stops as soon as it's seen "limit" rows (including any skipped by offset).
106 */
107class LimitOffsetIterator final : public RowIterator {
108 public:
109 /**
110 @param thd Thread context
111 @param source Row source
112 @param limit Maximum number of rows to read, including the ones skipped by
113 offset. Can be HA_POS_ERROR for no limit.
114 @param offset Number of initial rows to skip. Can be 0 for no offset.
115 @param count_all_rows If true, the query will run to completion to get
116 more accurate numbers for skipped_rows, so you will not get any
117 performance benefits of early end.
118 @param reject_multiple_rows True if a derived table transformed from a
119 scalar subquery needs a run-time cardinality check
120 @param skipped_rows If not nullptr, is incremented for each row skipped by
121 offset or limit.
122 */
124 ha_rows limit, ha_rows offset, bool count_all_rows,
125 bool reject_multiple_rows, ha_rows *skipped_rows)
126 : RowIterator(thd),
127 m_source(std::move(source)),
128 m_limit(limit),
129 m_offset(offset),
130 m_count_all_rows(count_all_rows),
131 m_reject_multiple_rows(reject_multiple_rows),
132 m_skipped_rows(skipped_rows) {
133 if (count_all_rows) {
134 assert(m_skipped_rows != nullptr);
135 }
136 }
137
138 bool Init() override;
139
140 int Read() override;
141
142 void SetNullRowFlag(bool is_null_row) override {
143 m_source->SetNullRowFlag(is_null_row);
144 }
145
146 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
147 void EndPSIBatchModeIfStarted() override {
148 m_source->EndPSIBatchModeIfStarted();
149 }
150 void UnlockRow() override { m_source->UnlockRow(); }
151
152 private:
154
155 // Note: The number of seen rows starts off at m_limit if we have OFFSET,
156 // which means we don't need separate LIMIT and OFFSET tests on the
157 // fast path of Read().
159
160 /**
161 Whether we have OFFSET rows that we still need to skip.
162 */
164
169};
170
171/**
172 Handles aggregation (typically used for GROUP BY) for the case where the rows
173 are already properly grouped coming in, ie., all rows that are supposed to be
174 part of the same group are adjacent in the input stream. (This could be
175 because they were sorted earlier, because we are scanning an index that
176 already gives us the rows in a group-compatible order, or because there is no
177 grouping.)
178
179 AggregateIterator needs to be able to save and restore rows; it doesn't know
180 when a group ends until it's seen the first row that is part of the _next_
181 group. When that happens, it needs to tuck away that next row, and then
182 restore the previous row so that the output row gets the correct grouped
183 values. A simple example, doing SELECT a, SUM(b) FROM t1 GROUP BY a:
184
185 t1.a t1.b SUM(b)
186 1 1 <-- first row, save it 1
187 1 2 3
188 1 3 6
189 2 1 <-- group changed, save row
190 [1 1] <-- restore first row, output 6
191 reset aggregate --> 0
192 [2 1] <-- restore new row, process it 1
193 2 10 11
194 <-- EOF, output 11
195
196 To save and restore rows like this, it uses the infrastructure from
197 pack_rows.h to pack and unpack all relevant rows into record[0] of every input
198 table. (Currently, there can only be one input table, but this may very well
199 change in the future.) It would be nice to have a more abstract concept of
200 sending a row around and taking copies of it if needed, as opposed to it
201 implicitly staying in the table's buffer. (This would also solve some
202 issues in EQRefIterator and when synthesizing NULL rows for outer joins.)
203 However, that's a large refactoring.
204 */
205class AggregateIterator final : public RowIterator {
206 public:
208 JOIN *join, pack_rows::TableCollection tables, bool rollup);
209
210 bool Init() override;
211 int Read() override;
212 void SetNullRowFlag(bool is_null_row) override {
213 m_source->SetNullRowFlag(is_null_row);
214 }
215
216 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
217 void EndPSIBatchModeIfStarted() override {
218 m_source->EndPSIBatchModeIfStarted();
219 }
220 void UnlockRow() override {
221 // Most likely, HAVING failed. Ideally, we'd like to backtrack and
222 // unlock all rows that went into this aggregate, but we can't do that,
223 // and we also can't unlock the _current_ row, since that belongs to a
224 // different group. Thus, do nothing.
225 }
226
227 private:
228 enum {
234
236
237 /**
238 The join we are part of. It would be nicer not to rely on this,
239 but we need a large number of members from there, like which
240 aggregate functions we have, the THD, temporary table parameters
241 and so on.
242 */
243 JOIN *m_join = nullptr;
244
245 /// Whether we have seen the last input row.
247
248 /**
249 Used to save NULL information in the specific case where we have
250 zero input rows.
251 */
253
254 /// Whether this is a rollup query.
255 const bool m_rollup;
256
257 /**
258 For rollup: The index of the first group item that did _not_ change when we
259 last switched groups. E.g., if we have group fields A,B,C,D and then switch
260 to group A,B,E,D, this value will become 1 (which means that we need
261 to output rollup rows for 2 -- A,B,E,NULL -- and then 1 -- A,B,NULL,NULL).
262 m_current_rollup_position will count down from the end until it becomes
263 less than this value.
264
265 If we do not have rollup, this value is perennially zero.
266 */
268
269 /**
270 If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
271 This value will start at the index of the last group expression and then
272 count backwards down to and including m_last_unchanged_group_item_idx.
273 It is used to communicate to the rollup group items whether to turn
274 themselves into NULLs, and the sum items which of their sums to output.
275 */
277
278 /**
279 The list of tables we are reading from; they are the ones for which we need
280 to save and restore rows.
281 */
283
284 /// Packed version of the first row in the group we are currently processing.
286
287 /**
288 If applicable, packed version of the first row in the _next_ group. This is
289 used only in the LAST_ROW_STARTED_NEW_GROUP state; we just saw a row that
290 didn't belong to the current group, so we saved it here and went to output
291 a group. On the next Read() call, we need to process this deferred row
292 first of all.
293
294 Even when not in use, this string contains a buffer that is large enough to
295 pack a full row into, sans blobs. (If blobs are present,
296 StoreFromTableBuffers() will automatically allocate more space if needed.)
297 */
299
300 /**
301 The slice we're setting when returning rows. See the comment in the
302 constructor.
303 */
305
306 void SetRollupLevel(int level);
307};
308
309/**
310 A simple nested loop join, taking in two iterators (left/outer and
311 right/inner) and joining them together. This may, of course, scan the inner
312 iterator many times. It is currently the only form of join we have.
313
314 The iterator works as a state machine, where the state records whether we need
315 to read a new outer row or not, and whether we've seen any rows from the inner
316 iterator at all (if not, an outer join need to synthesize a new NULL row).
317
318 The iterator takes care of activating performance schema batch mode on the
319 right iterator if needed; this is typically only used if it is the innermost
320 table in the entire join (where the gains from turning on batch mode is the
321 largest, and the accuracy loss from turning it off are the least critical).
322 */
323class NestedLoopIterator final : public RowIterator {
324 public:
328 JoinType join_type, bool pfs_batch_mode)
329 : RowIterator(thd),
330 m_source_outer(std::move(source_outer)),
331 m_source_inner(std::move(source_inner)),
333 m_pfs_batch_mode(pfs_batch_mode) {
334 assert(m_source_outer != nullptr);
335 assert(m_source_inner != nullptr);
336
337 // Batch mode makes no sense for anti- or semijoins, since they should only
338 // be reading one row.
340 assert(!pfs_batch_mode);
341 }
342 }
343
344 bool Init() override;
345
346 int Read() override;
347
348 void SetNullRowFlag(bool is_null_row) override {
349 // TODO: write something here about why we can't do this lazily.
350 m_source_outer->SetNullRowFlag(is_null_row);
351 m_source_inner->SetNullRowFlag(is_null_row);
352 }
353
354 void EndPSIBatchModeIfStarted() override {
355 m_source_outer->EndPSIBatchModeIfStarted();
356 m_source_inner->EndPSIBatchModeIfStarted();
357 }
358
359 void UnlockRow() override {
360 // Since we don't know which condition that caused the row to be rejected,
361 // we can't know whether we could also unlock the outer row
362 // (it may still be used as parts of other joined rows).
364 m_source_inner->UnlockRow();
365 }
366 }
367
368 private:
369 enum {
375
379
380 /** Whether to use batch mode when scanning the inner iterator. */
382};
383
384/**
385 An iterator that helps invalidating caches. Every time a row passes through it
386 or it changes state in any other way, it increments its “generation” counter.
387 This allows MaterializeIterator to see whether any of its dependencies has
388 changed, and then force a rematerialization -- this is typically used for
389 LATERAL tables, where we're joining in a derived table that depends on
390 something earlier in the join.
391 */
393 public:
396 const std::string &name)
397 : RowIterator(thd),
398 m_source_iterator(std::move(source_iterator)),
399 m_name(name) {}
400
401 bool Init() override {
402 ++m_generation;
403 return m_source_iterator->Init();
404 }
405
406 int Read() override {
407 ++m_generation;
408 return m_source_iterator->Read();
409 }
410
411 void SetNullRowFlag(bool is_null_row) override {
412 ++m_generation;
413 m_source_iterator->SetNullRowFlag(is_null_row);
414 }
415
416 void UnlockRow() override { m_source_iterator->UnlockRow(); }
417
418 int64_t generation() const { return m_generation; }
419 std::string name() const { return m_name; }
420
421 private:
423 int64_t m_generation = 0;
424 std::string m_name;
425};
426
428/**
429 A query block to be materialized by MaterializeIterator.
430 (@see MaterializeIterator for details.)
431*/
433 /// The iterator to read the actual rows from.
435
436 /// Used only for optimizer trace.
438
439 /// The JOIN that this query block represents. Used for performance
440 /// schema batch mode: When materializing a query block that consists of
441 /// a single table, MaterializeIterator needs to set up schema batch mode,
442 /// since there is no nested loop iterator to do it. (This is similar to
443 /// what ExecuteIteratorQuery() needs to do at the top level.)
445
446 /// If true, unique constraint checking via hash key is disabled
447 /// when materializing this query block (ie., we simply avoid calling
448 /// check_unique_constraint() for each row). Used when materializing
449 /// UNION DISTINCT and UNION ALL parts into the same table.
450 /// We'd like to just use a unique constraint via unique index instead,
451 /// but there might be other indexes on the destination table
452 /// that we'd like to keep, and the implementation doesn't allow
453 /// disabling only one index.
454 ///
455 /// If you use this on a query block, doing_hash_deduplication()
456 /// must be true.
458
459 /// If set to false, the Field objects in the output row are
460 /// presumed already to be filled out. This is the case iff
461 /// there's a windowing iterator earlier in the chain.
463
464 /// The number of operands (i.e. blocks) involved in the set operation:
465 /// used for INTERSECT to determine if a value is present in all operands
467 /// The current operand (i.e. block) number, starting at zero. We use this
468 /// for INTERSECT and EXCEPT materialization operand.
470 /// Used for EXCEPT computation: the index of the first operand involved in
471 /// a N-ary except operation which has DISTINCT. This is significant for
472 /// calculating whether to set the counter to zero or just decrement it
473 /// when we see a right side operand.
475
476 /// If copy_items is true, used for copying the Field objects
477 /// into the temporary table row. Otherwise unused.
479
480 // Whether this query block is a recursive reference back to the
481 // output of the materialization.
483
484 // If is_recursive_reference is true, contains the FollowTailIterator
485 // in the query block (there can be at most one recursive reference
486 // in a join list, as per the SQL standard, so there should be exactly one).
487 // Used for informing the iterators about various shared state in the
488 // materialization (including coordinating rematerializations).
490};
491
492/**
493 Create an iterator that materializes a set of row into a temporary table
494 and sets up a (pre-existing) iterator to access that.
495 @see MaterializeIterator.
496
497 @param thd Thread handler.
498 @param query_blocks_to_materialize List of query blocks to materialize.
499 @param path_params MaterializePath settings.
500 @param table_iterator Iterator used for accessing the temporary table
501 after materialization.
502 @param join
503 When materializing within the same JOIN (e.g., into a temporary table
504 before sorting), as opposed to a derived table or a CTE, we may need
505 to change the slice on the join before returning rows from the result
506 table. If so, join and ref_slice would need to be set, and
507 query_blocks_to_materialize should contain only one member, with the same
508 join.
509 @return the iterator.
510*/
513 query_blocks_to_materialize,
514 const MaterializePathParameters *path_params,
516 JOIN *join);
517
518} // namespace materialize_iterator
519
521/**
522 Create an iterator that aggregates the output rows from another iterator
523 into a temporary table and then sets up a (pre-existing) iterator to
524 access the temporary table.
525 @see TemptableAggregateIterator.
526
527 @param thd Thread handler.
528 @param subquery_iterator input to aggregation.
529 @param temp_table_param temporary table settings.
530 @param table_iterator Iterator used for scanning the temporary table
531 after materialization.
532 @param table the temporary table.
533 @param join the JOIN in which we aggregate.
534 @param ref_slice the slice to set when accessing temporary table;
535 used if anything upstream wants to evaluate values based on its contents.
536 @return the iterator.
537*/
539 THD *thd, unique_ptr_destroy_only<RowIterator> subquery_iterator,
540 Temp_table_param *temp_table_param, TABLE *table,
542 int ref_slice);
543
544} // namespace temptable_aggregate_iterator
545
546/**
547 StreamingIterator is a minimal version of MaterializeIterator that does not
548 actually materialize; instead, every Read() just forwards the call to the
549 subquery iterator and does the required copying from one set of fields to
550 another.
551
552 It is used for when the optimizer would normally set up a materialization,
553 but you don't actually need one, ie. you don't want to read the rows multiple
554 times after writing them, and you don't want to access them by index (only
555 a single table scan). It also takes care of setting the NULL row flag
556 on the temporary table.
557 */
559 public:
560 /**
561 @param thd Thread handle.
562 @param subquery_iterator The iterator to read rows from.
563 @param temp_table_param Parameters for the temp table.
564 @param table The table we are streaming through. Will never actually
565 be written to, but its fields will be used.
566 @param provide_rowid If true, generate a row ID for each row we stream.
567 This is used if the parent needs row IDs for deduplication, in particular
568 weedout.
569 @param join See MaterializeIterator.
570 @param ref_slice See MaterializeIterator.
571 */
573 unique_ptr_destroy_only<RowIterator> subquery_iterator,
574 Temp_table_param *temp_table_param, TABLE *table,
575 bool provide_rowid, JOIN *join, int ref_slice);
576
577 bool Init() override;
578
579 int Read() override;
580
581 void StartPSIBatchMode() override {
582 m_subquery_iterator->StartPSIBatchMode();
583 }
584 void EndPSIBatchModeIfStarted() override {
585 m_subquery_iterator->EndPSIBatchModeIfStarted();
586 }
587 void UnlockRow() override { m_subquery_iterator->UnlockRow(); }
588
589 private:
593 JOIN *const m_join;
594 const int m_output_slice;
596
597 // Whether the iterator should generate and provide a row ID. Only true if the
598 // iterator is part of weedout, where the iterator will create a fake row ID
599 // to uniquely identify the rows it produces.
600 const bool m_provide_rowid;
601};
602
603/**
604 An iterator that wraps a Table_function (e.g. JSON_TABLE) and allows you to
605 iterate over the materialized temporary table. The table is materialized anew
606 for every Init().
607
608 TODO: Just wrapping it is probably not the optimal thing to do;
609 Table_function is highly oriented around materialization, but never caches.
610 Thus, perhaps we should rewrite Table_function to return a RowIterator
611 instead of going through a temporary table.
612 */
614 public:
616 THD *thd, Table_function *table_function, TABLE *table,
618
619 bool Init() override;
620 int Read() override { return m_table_iterator->Read(); }
621 void SetNullRowFlag(bool is_null_row) override {
622 m_table_iterator->SetNullRowFlag(is_null_row);
623 }
624
625 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
626 void EndPSIBatchModeIfStarted() override {
627 m_table_iterator->EndPSIBatchModeIfStarted();
628 }
629
630 // The temporary table is private to us, so there's no need to worry about
631 // locks to other transactions.
632 void UnlockRow() override {}
633
634 private:
636
638};
639
640/**
641 Like semijoin materialization, weedout works on the basic idea that a semijoin
642 is just like an inner join as we long as we can get rid of the duplicates
643 somehow. (This is advantageous, because inner joins can be reordered, whereas
644 semijoins generally can't.) However, unlike semijoin materialization, weedout
645 removes duplicates after the join, not before it. Consider something like
646
647 SELECT * FROM t1 WHERE a IN ( SELECT b FROM t2 );
648
649 Semijoin materialization solves this by materializing t2, with deduplication,
650 and then joining. Weedout joins t1 to t2 and then leaves only one output row
651 per t1 row. The disadvantage is that this potentially needs to discard more
652 rows; the (potential) advantage is that we deduplicate on t1 instead of t2.
653
654 Weedout, unlike materialization, works in a streaming fashion; rows are output
655 (or discarded) as they come in, with a temporary table used for recording the
656 row IDs we've seen before. (We need to deduplicate on t1's row IDs, not its
657 contents.) See SJ_TMP_TABLE for details about the table format.
658 */
659class WeedoutIterator final : public RowIterator {
660 public:
662 SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for);
663
664 bool Init() override;
665 int Read() override;
666
667 void SetNullRowFlag(bool is_null_row) override {
668 m_source->SetNullRowFlag(is_null_row);
669 }
670
671 void EndPSIBatchModeIfStarted() override {
672 m_source->EndPSIBatchModeIfStarted();
673 }
674 void UnlockRow() override { m_source->UnlockRow(); }
675
676 private:
680};
681
682/**
683 An iterator that removes consecutive rows that are the same according to
684 a set of items (typically the join key), so-called “loose scan”
685 (not to be confused with “loose index scan”, which is made by the
686 range optimizer). This is similar in spirit to WeedoutIterator above
687 (removing duplicates allows us to treat the semijoin as a normal join),
688 but is much cheaper if the data is already ordered/grouped correctly,
689 as the removal can happen before the join, and it does not need a
690 temporary table.
691 */
693 public:
696 JOIN *join, Item **group_items,
697 int group_items_size);
698
699 bool Init() override;
700 int Read() override;
701
702 void SetNullRowFlag(bool is_null_row) override {
703 m_source->SetNullRowFlag(is_null_row);
704 }
705
706 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
707 void EndPSIBatchModeIfStarted() override {
708 m_source->EndPSIBatchModeIfStarted();
709 }
710 void UnlockRow() override { m_source->UnlockRow(); }
711
712 private:
716};
717
718/**
719 Much like RemoveDuplicatesIterator, but works on the basis of a given index
720 (or more accurately, its keypart), not an arbitrary list of grouped fields.
721 This is only used in the non-hypergraph optimizer; the hypergraph optimizer
722 can deal with groupings that come from e.g. sorts.
723 */
725 public:
728 const TABLE *table, KEY *key, size_t key_len);
729
730 bool Init() override;
731 int Read() override;
732
733 void SetNullRowFlag(bool is_null_row) override {
734 m_source->SetNullRowFlag(is_null_row);
735 }
736
737 void StartPSIBatchMode() override { m_source->StartPSIBatchMode(); }
738 void EndPSIBatchModeIfStarted() override {
739 m_source->EndPSIBatchModeIfStarted();
740 }
741 void UnlockRow() override { m_source->UnlockRow(); }
742
743 private:
747 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
748 const size_t m_key_len;
750};
751
752/**
753 An iterator that is semantically equivalent to a semijoin NestedLoopIterator
754 immediately followed by a RemoveDuplicatesOnIndexIterator. It is used to
755 implement the “loose scan” strategy in queries with multiple tables on the
756 inside of a semijoin, like
757
758 ... FROM t1 WHERE ... IN ( SELECT ... FROM t2 JOIN t3 ... )
759
760 In this case, the query tree without this iterator would ostensibly look like
761
762 -> Nested loop join
763 -> Table scan on t1
764 -> Remove duplicates on t2_idx
765 -> Nested loop semijoin
766 -> Index scan on t2 using t2_idx
767 -> Filter (e.g. t3.a = t2.a)
768 -> Table scan on t3
769
770 (t3 will be marked as “first match” on t2 when implementing loose scan,
771 thus the semijoin.)
772
773 First note that we can't put the duplicate removal directly on t2 in this
774 case, as the first t2 row doesn't necessarily match anything in t3, so it
775 needs to be above. However, this is wasteful, because once we find a matching
776 t2/t3 pair, we should stop scanning t3 until we have a new t2.
777
778 NestedLoopSemiJoinWithDuplicateRemovalIterator solves the problem by doing
779 exactly this; it gets a row from the outer side, gets exactly one row from the
780 inner side, and then skips over rows from the outer side (_without_ scanning
781 the inner side) until its keypart changes.
782 */
784 : public RowIterator {
785 public:
788 unique_ptr_destroy_only<RowIterator> source_inner, const TABLE *table,
789 KEY *key, size_t key_len);
790
791 bool Init() override;
792
793 int Read() override;
794
795 void SetNullRowFlag(bool is_null_row) override {
796 m_source_outer->SetNullRowFlag(is_null_row);
797 m_source_inner->SetNullRowFlag(is_null_row);
798 }
799
800 void EndPSIBatchModeIfStarted() override {
801 m_source_outer->EndPSIBatchModeIfStarted();
802 m_source_inner->EndPSIBatchModeIfStarted();
803 }
804
805 void UnlockRow() override {
806 m_source_outer->UnlockRow();
807 m_source_inner->UnlockRow();
808 }
809
810 private:
813
816 uchar *m_key_buf; // Owned by the THD's MEM_ROOT.
817 const size_t m_key_len;
819};
820
821/**
822 MaterializeInformationSchemaTableIterator makes sure a given I_S temporary
823 table is materialized (filled out) before we try to scan it.
824 */
826 public:
829 Table_ref *table_list, Item *condition);
830
831 bool Init() override;
832 int Read() override { return m_table_iterator->Read(); }
833
834 void SetNullRowFlag(bool is_null_row) override {
835 m_table_iterator->SetNullRowFlag(is_null_row);
836 }
837
838 void StartPSIBatchMode() override { m_table_iterator->StartPSIBatchMode(); }
839 void EndPSIBatchModeIfStarted() override {
840 m_table_iterator->EndPSIBatchModeIfStarted();
841 }
842
843 // The temporary table is private to us, so there's no need to worry about
844 // locks to other transactions.
845 void UnlockRow() override {}
846
847 private:
848 /// The iterator that reads from the materialized table.
852};
853
854/**
855 Takes in two or more iterators and output rows from them sequentially
856 (first all rows from the first one, the all from the second one, etc.).
857 Used for implementing UNION ALL, typically together with StreamingIterator.
858 */
859class AppendIterator final : public RowIterator {
860 public:
862 THD *thd,
864
865 bool Init() override;
866 int Read() override;
867
868 void StartPSIBatchMode() override;
869 void EndPSIBatchModeIfStarted() override;
870
871 void SetNullRowFlag(bool is_null_row) override;
872 void UnlockRow() override;
873
874 private:
875 std::vector<unique_ptr_destroy_only<RowIterator>> m_sub_iterators;
878};
879
880#endif // SQL_ITERATORS_COMPOSITE_ITERATORS_H_
Handles aggregation (typically used for GROUP BY) for the case where the rows are already properly gr...
Definition: composite_iterators.h:205
void UnlockRow() override
Definition: composite_iterators.h:220
int m_current_rollup_position
If we are in state OUTPUTTING_ROLLUP_ROWS, where we are in the iteration.
Definition: composite_iterators.h:276
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:216
int Read() override
Read a single row.
Definition: composite_iterators.cc:238
JOIN * m_join
The join we are part of.
Definition: composite_iterators.h:243
bool m_seen_eof
Whether we have seen the last input row.
Definition: composite_iterators.h:246
@ LAST_ROW_STARTED_NEW_GROUP
Definition: composite_iterators.h:230
@ READING_FIRST_ROW
Definition: composite_iterators.h:229
@ OUTPUTTING_ROLLUP_ROWS
Definition: composite_iterators.h:231
@ DONE_OUTPUTTING_ROWS
Definition: composite_iterators.h:232
enum AggregateIterator::@54 m_state
pack_rows::TableCollection m_tables
The list of tables we are reading from; they are the ones for which we need to save and restore rows.
Definition: composite_iterators.h:282
String m_first_row_this_group
Packed version of the first row in the group we are currently processing.
Definition: composite_iterators.h:285
String m_first_row_next_group
If applicable, packed version of the first row in the next group.
Definition: composite_iterators.h:298
table_map m_save_nullinfo
Used to save NULL information in the specific case where we have zero input rows.
Definition: composite_iterators.h:252
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:190
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:235
int m_output_slice
The slice we're setting when returning rows.
Definition: composite_iterators.h:304
AggregateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, pack_rows::TableCollection tables, bool rollup)
Definition: composite_iterators.cc:177
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:217
const bool m_rollup
Whether this is a rollup query.
Definition: composite_iterators.h:255
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:212
void SetRollupLevel(int level)
Definition: composite_iterators.cc:443
int m_last_unchanged_group_item_idx
For rollup: The index of the first group item that did not change when we last switched groups.
Definition: composite_iterators.h:267
Takes in two or more iterators and output rows from them sequentially (first all rows from the first ...
Definition: composite_iterators.h:859
size_t m_current_iterator_index
Definition: composite_iterators.h:876
AppendIterator(THD *thd, std::vector< unique_ptr_destroy_only< RowIterator > > &&sub_iterators)
Definition: composite_iterators.cc:2226
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.cc:2263
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:2232
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.cc:2273
void UnlockRow() override
Definition: composite_iterators.cc:2281
std::vector< unique_ptr_destroy_only< RowIterator > > m_sub_iterators
Definition: composite_iterators.h:875
int Read() override
Read a single row.
Definition: composite_iterators.cc:2238
bool m_pfs_batch_mode_enabled
Definition: composite_iterators.h:877
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.cc:2268
A wrapper class which provides array bounds checking.
Definition: sql_array.h:47
An iterator that helps invalidating caches.
Definition: composite_iterators.h:392
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.h:401
CacheInvalidatorIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_iterator, const std::string &name)
Definition: composite_iterators.h:394
std::string m_name
Definition: composite_iterators.h:424
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:411
unique_ptr_destroy_only< RowIterator > m_source_iterator
Definition: composite_iterators.h:422
void UnlockRow() override
Definition: composite_iterators.h:416
int64_t m_generation
Definition: composite_iterators.h:423
int Read() override
Read a single row.
Definition: composite_iterators.h:406
std::string name() const
Definition: composite_iterators.h:419
int64_t generation() const
Definition: composite_iterators.h:418
This is used for segregating rows in groups (e.g.
Definition: item.h:6340
An iterator that takes in a stream of rows and passes through only those that meet some criteria (i....
Definition: composite_iterators.h:78
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:93
int Read() override
Read a single row.
Definition: composite_iterators.cc:76
void UnlockRow() override
Definition: composite_iterators.h:96
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.h:84
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:99
FilterIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, Item *condition)
Definition: composite_iterators.h:80
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:92
Item * m_condition
Definition: composite_iterators.h:100
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:88
FollowTailIterator is a special version of TableScanIterator that is used as part of WITH RECURSIVE q...
Definition: basic_row_iterators.h:439
Base class that is used to represent any kind of expression in a relational query.
Definition: item.h:851
Definition: sql_optimizer.h:133
Definition: key.h:113
Handles LIMIT and/or OFFSET; Init() eats the first "offset" rows, and Read() stops as soon as it's se...
Definition: composite_iterators.h:107
void UnlockRow() override
Definition: composite_iterators.h:150
LimitOffsetIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, ha_rows limit, ha_rows offset, bool count_all_rows, bool reject_multiple_rows, ha_rows *skipped_rows)
Definition: composite_iterators.h:123
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:101
ha_rows m_seen_rows
Definition: composite_iterators.h:158
const bool m_count_all_rows
Definition: composite_iterators.h:166
ha_rows * m_skipped_rows
Definition: composite_iterators.h:168
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:147
const ha_rows m_limit
Definition: composite_iterators.h:165
const ha_rows m_offset
Definition: composite_iterators.h:165
bool m_needs_offset
Whether we have OFFSET rows that we still need to skip.
Definition: composite_iterators.h:163
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:142
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:153
int Read() override
Read a single row.
Definition: composite_iterators.cc:115
const bool m_reject_multiple_rows
Definition: composite_iterators.h:167
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:146
MaterializeInformationSchemaTableIterator makes sure a given I_S temporary table is materialized (fil...
Definition: composite_iterators.h:825
Item * m_condition
Definition: composite_iterators.h:851
void UnlockRow() override
Definition: composite_iterators.h:845
MaterializeInformationSchemaTableIterator(THD *thd, unique_ptr_destroy_only< RowIterator > table_iterator, Table_ref *table_list, Item *condition)
Definition: composite_iterators.cc:2201
Table_ref * m_table_list
Definition: composite_iterators.h:850
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:834
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:839
int Read() override
Read a single row.
Definition: composite_iterators.h:832
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:838
unique_ptr_destroy_only< RowIterator > m_table_iterator
The iterator that reads from the materialized table.
Definition: composite_iterators.h:849
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:2209
An iterator that wraps a Table_function (e.g.
Definition: composite_iterators.h:613
Table_function * m_table_function
Definition: composite_iterators.h:637
unique_ptr_destroy_only< RowIterator > m_table_iterator
Definition: composite_iterators.h:635
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:625
void UnlockRow() override
Definition: composite_iterators.h:632
int Read() override
Read a single row.
Definition: composite_iterators.h:620
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:621
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:626
MaterializedTableFunctionIterator(THD *thd, Table_function *table_function, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator)
Definition: composite_iterators.cc:1958
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:1965
A typesafe replacement for DYNAMIC_ARRAY.
Definition: mem_root_array.h:426
A simple nested loop join, taking in two iterators (left/outer and right/inner) and joining them toge...
Definition: composite_iterators.h:323
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:455
int Read() override
Read a single row.
Definition: composite_iterators.cc:466
void UnlockRow() override
Definition: composite_iterators.h:359
enum NestedLoopIterator::@55 m_state
@ END_OF_ROWS
Definition: composite_iterators.h:373
@ READING_INNER_ROWS
Definition: composite_iterators.h:372
@ NEEDS_OUTER_ROW
Definition: composite_iterators.h:370
@ READING_FIRST_INNER_ROW
Definition: composite_iterators.h:371
const bool m_pfs_batch_mode
Whether to use batch mode when scanning the inner iterator.
Definition: composite_iterators.h:381
NestedLoopIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, JoinType join_type, bool pfs_batch_mode)
Definition: composite_iterators.h:325
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:377
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:354
const JoinType m_join_type
Definition: composite_iterators.h:378
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:348
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:376
An iterator that is semantically equivalent to a semijoin NestedLoopIterator immediately followed by ...
Definition: composite_iterators.h:784
int Read() override
Read a single row.
Definition: composite_iterators.cc:2145
void UnlockRow() override
Definition: composite_iterators.h:805
KEY * m_key
Definition: composite_iterators.h:815
unique_ptr_destroy_only< RowIterator > const m_source_outer
Definition: composite_iterators.h:811
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:795
const size_t m_key_len
Definition: composite_iterators.h:817
bool m_deduplicate_against_previous_row
Definition: composite_iterators.h:818
uchar * m_key_buf
Definition: composite_iterators.h:816
unique_ptr_destroy_only< RowIterator > const m_source_inner
Definition: composite_iterators.h:812
NestedLoopSemiJoinWithDuplicateRemovalIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source_outer, unique_ptr_destroy_only< RowIterator > source_inner, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:2122
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:800
const TABLE * m_table_outer
Definition: composite_iterators.h:814
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:2137
This class represents a query expression (one query block or several query blocks combined with UNION...
Definition: sql_lex.h:623
An iterator that removes consecutive rows that are the same according to a set of items (typically th...
Definition: composite_iterators.h:692
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:706
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:707
void UnlockRow() override
Definition: composite_iterators.h:710
int Read() override
Read a single row.
Definition: composite_iterators.cc:2056
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:702
bool m_first_row
Definition: composite_iterators.h:715
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:2051
Bounds_checked_array< Cached_item * > m_caches
Definition: composite_iterators.h:714
RemoveDuplicatesIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, JOIN *join, Item **group_items, int group_items_size)
Definition: composite_iterators.cc:2039
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:713
Much like RemoveDuplicatesIterator, but works on the basis of a given index (or more accurately,...
Definition: composite_iterators.h:724
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:2093
void UnlockRow() override
Definition: composite_iterators.h:741
uchar * m_key_buf
Definition: composite_iterators.h:747
bool m_first_row
Definition: composite_iterators.h:749
const TABLE * m_table
Definition: composite_iterators.h:745
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:737
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:733
const size_t m_key_len
Definition: composite_iterators.h:748
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:738
RemoveDuplicatesOnIndexIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, const TABLE *table, KEY *key, size_t key_len)
Definition: composite_iterators.cc:2083
KEY * m_key
Definition: composite_iterators.h:746
int Read() override
Read a single row.
Definition: composite_iterators.cc:2098
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:744
A context for reading through a single table using a chosen access method: index read,...
Definition: row_iterator.h:82
THD * thd() const
Definition: row_iterator.h:228
Definition: sql_executor.h:104
StreamingIterator is a minimal version of MaterializeIterator that does not actually materialize; ins...
Definition: composite_iterators.h:558
JOIN *const m_join
Definition: composite_iterators.h:593
void UnlockRow() override
Definition: composite_iterators.h:587
void StartPSIBatchMode() override
Start performance schema batch mode, if supported (otherwise ignored).
Definition: composite_iterators.h:581
int Read() override
Read a single row.
Definition: composite_iterators.cc:1551
StreamingIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, bool provide_rowid, JOIN *join, int ref_slice)
Definition: composite_iterators.cc:1499
Temp_table_param * m_temp_table_param
Definition: composite_iterators.h:591
const bool m_provide_rowid
Definition: composite_iterators.h:600
const int m_output_slice
Definition: composite_iterators.h:594
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:584
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:1528
int m_input_slice
Definition: composite_iterators.h:595
unique_ptr_destroy_only< RowIterator > m_subquery_iterator
Definition: composite_iterators.h:590
ha_rows m_row_number
Definition: composite_iterators.h:592
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:168
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:34
Definition: row_iterator.h:234
TABLE * table() const
Definition: row_iterator.h:246
Class representing a table function.
Definition: table_function.h:53
Definition: table.h:2790
Object containing parameters used when creating and using temporary tables.
Definition: temp_table_param.h:95
Like semijoin materialization, weedout works on the basic idea that a semijoin is just like an inner ...
Definition: composite_iterators.h:659
unique_ptr_destroy_only< RowIterator > m_source
Definition: composite_iterators.h:677
void EndPSIBatchModeIfStarted() override
Ends performance schema batch mode, if started.
Definition: composite_iterators.h:671
WeedoutIterator(THD *thd, unique_ptr_destroy_only< RowIterator > source, SJ_TMP_TABLE *sj, table_map tables_to_get_rowid_for)
Definition: composite_iterators.cc:1978
int Read() override
Read a single row.
Definition: composite_iterators.cc:2008
const table_map m_tables_to_get_rowid_for
Definition: composite_iterators.h:679
SJ_TMP_TABLE * m_sj
Definition: composite_iterators.h:678
void UnlockRow() override
Definition: composite_iterators.h:674
void SetNullRowFlag(bool is_null_row) override
Mark the current row buffer as containing a NULL row or not, so that if you read from it and the flag...
Definition: composite_iterators.h:667
bool Init() override
Initialize or reinitialize the iterator.
Definition: composite_iterators.cc:1991
A structure that contains a list of input tables for a hash join operation, BKA join operation or a s...
Definition: pack_rows.h:93
JoinType
Definition: join_type.h:28
This file follows Google coding style, except for the name MEM_ROOT (which is kept for historical rea...
std::unique_ptr< T, Destroy_only< T > > unique_ptr_destroy_only
std::unique_ptr, but only destroying.
Definition: my_alloc.h:489
This file includes constants used by all storage engines.
my_off_t ha_rows
Definition: my_base.h:1140
Some integer typedefs for easier portability.
unsigned long long int ulonglong
Definition: my_inttypes.h:56
unsigned char uchar
Definition: my_inttypes.h:52
uint64_t table_map
Definition: my_table_map.h:30
Definition: composite_iterators.h:427
RowIterator * CreateIterator(THD *thd, Mem_root_array< materialize_iterator::QueryBlock > query_blocks_to_materialize, const MaterializePathParameters *path_params, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join)
Create an iterator that materializes a set of row into a temporary table and sets up a (pre-existing)...
Definition: composite_iterators.cc:1472
std::string join(Container cont, const std::string &delim)
join elements of an container into a string separated by a delimiter.
Definition: string.h:151
Definition: gcs_xcom_synode.h:64
Definition: composite_iterators.h:520
RowIterator * CreateIterator(THD *thd, unique_ptr_destroy_only< RowIterator > subquery_iterator, Temp_table_param *temp_table_param, TABLE *table, unique_ptr_destroy_only< RowIterator > table_iterator, JOIN *join, int ref_slice)
Create an iterator that aggregates the output rows from another iterator into a temporary table and t...
Definition: composite_iterators.cc:1930
std::vector< T, ut::allocator< T > > vector
Specialization of vector which uses allocator.
Definition: ut0new.h:2874
Generic routines for packing rows (possibly from multiple tables at the same time) into strings,...
required string key
Definition: replication_asynchronous_connection_failover.proto:60
repeated Source source
Definition: replication_asynchronous_connection_failover.proto:42
join_type
Definition: sql_opt_exec_shared.h:186
Our own string classes, used pervasively throughout the executor.
Definition: materialize_path_parameters.h:40
Definition: table.h:1398
A query block to be materialized by MaterializeIterator.
Definition: composite_iterators.h:432
int select_number
Used only for optimizer trace.
Definition: composite_iterators.h:437
ulonglong m_total_operands
The number of operands (i.e.
Definition: composite_iterators.h:466
JOIN * join
The JOIN that this query block represents.
Definition: composite_iterators.h:444
Temp_table_param * temp_table_param
If copy_items is true, used for copying the Field objects into the temporary table row.
Definition: composite_iterators.h:478
ulonglong m_operand_idx
The current operand (i.e.
Definition: composite_iterators.h:469
unique_ptr_destroy_only< RowIterator > subquery_iterator
The iterator to read the actual rows from.
Definition: composite_iterators.h:434
uint m_first_distinct
Used for EXCEPT computation: the index of the first operand involved in a N-ary except operation whic...
Definition: composite_iterators.h:474
FollowTailIterator * recursive_reader
Definition: composite_iterators.h:489
bool is_recursive_reference
Definition: composite_iterators.h:482
bool disable_deduplication_by_hash_field
If true, unique constraint checking via hash key is disabled when materializing this query block (ie....
Definition: composite_iterators.h:457
bool copy_items
If set to false, the Field objects in the output row are presumed already to be filled out.
Definition: composite_iterators.h:462
unsigned int uint
Definition: uca9-dump.cc:75