MySQL 8.4.0
Source Code Documentation
path_helpers.h
Go to the documentation of this file.
1/* Copyright (c) 2021, 2024, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
25#define SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
26
27/**
28 @file
29 Various small helpers to abstract over the fact that AccessPath can contain
30 a number of different range scan types.
31 */
32
40
41inline bool is_loose_index_scan(const AccessPath *path) {
42 return path->type == AccessPath::INDEX_SKIP_SCAN ||
44}
45
47 switch (path->type) {
49 return path->index_skip_scan().param->has_aggregate_function;
50 break;
52 return path->group_index_skip_scan().param->have_agg_distinct;
53 break;
54 default:
55 return false;
56 }
57}
58
59/**
60 Whether the range access method is capable of returning records
61 in reverse order.
62 */
64 return path->type == AccessPath::INDEX_RANGE_SCAN;
65}
66
67/**
68 Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse
69 order. (Note that non-range index scans return false here.)
70 */
72 return path->type == AccessPath::INDEX_RANGE_SCAN &&
73 path->index_range_scan().reverse;
74}
75
76/**
77 Ask the AccessPath to reverse itself; returns false if successful.
78 Overridden only in INDEX_RANGE_SCAN.
79 */
80inline bool make_reverse(uint used_key_parts, AccessPath *path) {
82 if (path->index_range_scan().geometry) {
83 return true;
84 }
85 path->index_range_scan().reverse = true;
86 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
87 path->index_range_scan().using_extended_key_parts =
88 (used_key_parts > table->key_info[path->index_range_scan().index]
89 .user_defined_key_parts);
90 return false;
91 } else {
92 return true;
93 }
94}
95
97 switch (path->type) {
99 path->index_range_scan().mrr_flags |= HA_MRR_SORTED;
100 break;
103 // Always sorted already.
104 break;
105 default:
106 assert(false);
107 }
108}
109
110/**
111 If this is an index range scan, and that range scan uses a single
112 index, returns the index used. Otherwise, MAX_KEY.
113 */
114inline unsigned used_index(const AccessPath *path) {
115 switch (path->type) {
117 return path->index_range_scan().index;
119 return path->index_skip_scan().index;
121 return path->group_index_skip_scan().index;
122 default:
123 return MAX_KEY;
124 }
125}
126
127/**
128 Return true if there is only one range and this uses the whole unique key.
129 */
130inline bool unique_key_range(const AccessPath *path) {
131 if (path->type != AccessPath::INDEX_RANGE_SCAN) {
132 return false;
133 }
134 if (path->index_range_scan().num_ranges == 1) {
135 QUICK_RANGE *tmp = path->index_range_scan().ranges[0];
136 if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) {
137 KEY *key =
138 path->index_range_scan().used_key_part[0].field->table->key_info +
139 path->index_range_scan().index;
140 return (key->flags & HA_NOSAME) && key->key_length == tmp->min_length;
141 }
142 }
143 return false;
144}
145
146inline void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields) {
147 switch (path->type) {
149 for (uint i = 0; i < path->index_range_scan().num_used_key_parts; ++i) {
151 used_fields,
152 path->index_range_scan().used_key_part[i].field->field_index());
153 }
154 break;
156 for (AccessPath *child : *path->index_merge().children) {
157 get_fields_used(child, used_fields);
158 }
159 break;
161 for (AccessPath *child : *path->rowid_intersection().children) {
162 get_fields_used(child, used_fields);
163 }
164 if (path->rowid_intersection().cpk_child != nullptr) {
165 get_fields_used(path->rowid_intersection().cpk_child, used_fields);
166 }
167 break;
169 for (AccessPath *child : *path->rowid_union().children) {
170 get_fields_used(child, used_fields);
171 }
172 break;
174 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts; ++i) {
175 bitmap_set_bit(used_fields, path->index_skip_scan()
176 .param->index_info->key_part[i]
177 .field->field_index());
178 }
179 break;
181 for (uint i = 0; i < path->group_index_skip_scan().num_used_key_parts;
182 ++i) {
183 bitmap_set_bit(used_fields, path->group_index_skip_scan()
184 .param->index_info->key_part[i]
185 .field->field_index());
186 }
187 break;
188 default:
189 assert(false);
190 }
191}
192
193inline unsigned get_used_key_parts(const AccessPath *path) {
194 switch (path->type) {
196 return path->index_range_scan().num_used_key_parts;
198 return path->index_skip_scan().num_used_key_parts;
200 return path->group_index_skip_scan().num_used_key_parts;
204 return 0;
205 default:
206 assert(false);
207 return 0;
208 }
209}
210
211/**
212 Return whether any index used by this range scan uses the field(s)
213 marked in passed bitmap. Assert-fails if not a range scan.
214 */
216 const MY_BITMAP *fields) {
217 switch (path->type) {
219 return is_key_used(path->index_range_scan().used_key_part[0].field->table,
220 path->index_range_scan().index, fields);
222 for (AccessPath *child : *path->index_merge().children) {
223 if (uses_index_on_fields(child, fields)) {
224 return true;
225 }
226 }
227 return false;
229 for (AccessPath *child : *path->rowid_intersection().children) {
230 if (uses_index_on_fields(child, fields)) {
231 return true;
232 }
233 }
234 return path->rowid_intersection().cpk_child != nullptr &&
235 uses_index_on_fields(path->rowid_intersection().cpk_child, fields);
237 for (AccessPath *child : *path->rowid_union().children) {
238 if (uses_index_on_fields(child, fields)) {
239 return true;
240 }
241 }
242 return false;
244 return is_key_used(path->index_skip_scan().table,
245 path->index_skip_scan().index, fields);
247 return is_key_used(path->group_index_skip_scan().table,
248 path->group_index_skip_scan().index, fields);
249 default:
250 assert(false);
251 return false;
252 }
253}
254
255/**
256 Get the total length of first used_key_parts parts of the key,
257 in bytes. Only applicable for range access types that use a single
258 index (others will assert-fail).
259 */
260inline unsigned get_max_used_key_length(const AccessPath *path) {
261 switch (path->type) {
263 int max_used_key_length = 0;
264 Bounds_checked_array ranges{path->index_range_scan().ranges,
265 path->index_range_scan().num_ranges};
266 for (const QUICK_RANGE *range : ranges) {
267 max_used_key_length =
268 std::max<int>(max_used_key_length, range->min_length);
269 max_used_key_length =
270 std::max<int>(max_used_key_length, range->max_length);
271 }
272 return max_used_key_length;
273 }
275 int max_used_key_length = 0;
276 KEY_PART_INFO *p = path->index_skip_scan().param->index_info->key_part;
277 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts;
278 i++, p++) {
279 max_used_key_length += p->store_length;
280 }
281 return max_used_key_length;
282 }
284 return path->group_index_skip_scan().param->max_used_key_length;
285 default:
286 assert(false);
287 return 0;
288 }
289}
290
291/*
292 Append text representation of the range scan (what and how is
293 merged) to str. The result is added to "Extra" field in EXPLAIN output.
294 */
295inline void add_info_string(const AccessPath *path, String *str) {
296 switch (path->type) {
298 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
299 KEY *key_info = table->key_info + path->index_range_scan().index;
300 str->append(key_info->name);
301 break;
302 }
304 bool first = true;
305 TABLE *table = path->index_merge().table;
306 str->append(STRING_WITH_LEN("sort_union("));
307
308 // For EXPLAIN compatibility with older versions, PRIMARY is always
309 // printed last.
310 for (bool print_primary : {false, true}) {
311 for (AccessPath *child : *path->index_merge().children) {
312 const bool is_primary = table->file->primary_key_is_clustered() &&
313 used_index(child) == table->s->primary_key;
314 if (is_primary != print_primary) continue;
315 if (!first)
316 str->append(',');
317 else
318 first = false;
319 ::add_info_string(child, str);
320 }
321 }
322 str->append(')');
323 break;
324 }
326 bool first = true;
327 str->append(STRING_WITH_LEN("intersect("));
328 for (AccessPath *current : *path->rowid_intersection().children) {
329 if (!first)
330 str->append(',');
331 else
332 first = false;
333 ::add_info_string(current, str);
334 }
335 if (path->rowid_intersection().cpk_child) {
336 str->append(',');
337 ::add_info_string(path->rowid_intersection().cpk_child, str);
338 }
339 str->append(')');
340 break;
341 }
343 bool first = true;
344 str->append(STRING_WITH_LEN("union("));
345 for (AccessPath *current : *path->rowid_union().children) {
346 if (!first)
347 str->append(',');
348 else
349 first = false;
350 ::add_info_string(current, str);
351 }
352 str->append(')');
353 break;
354 }
356 str->append(STRING_WITH_LEN("index_for_skip_scan("));
357 str->append(path->index_skip_scan().param->index_info->name);
358 str->append(')');
359 break;
360 }
362 str->append(STRING_WITH_LEN("index_for_group_by("));
363 str->append(path->group_index_skip_scan().param->index_info->name);
364 str->append(')');
365 break;
366 }
367 default:
368 assert(false);
369 }
370}
371
372/*
373 Append comma-separated list of keys this quick select uses to key_names;
374 append comma-separated list of corresponding used lengths to used_lengths.
375 This is used by select_describe.
376
377 path must be a range scan, or there will be an assert.
378 */
379inline void add_keys_and_lengths(const AccessPath *path, String *key_names,
380 String *used_lengths) {
381 switch (path->type) {
383 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
384 KEY *key_info = table->key_info + path->index_range_scan().index;
385 key_names->append(key_info->name);
386
387 char buf[64];
388 size_t length =
390 used_lengths->append(buf, length);
391 break;
392 }
394 add_keys_and_lengths_index_merge(path, key_names, used_lengths);
395 break;
397 add_keys_and_lengths_rowid_intersection(path, key_names, used_lengths);
398 break;
400 add_keys_and_lengths_rowid_union(path, key_names, used_lengths);
401 break;
403 key_names->append(path->index_skip_scan().param->index_info->name);
404
405 char buf[64];
406 uint length =
408 used_lengths->append(buf, length);
409
410 break;
411 }
413 key_names->append(path->group_index_skip_scan().param->index_info->name);
414
415 char buf[64];
416 uint length =
418 used_lengths->append(buf, length);
419
420 break;
421 }
422 default:
423 assert(false);
424 }
425}
426
427/**
428 Add basic info for this range scan to the optimizer trace.
429
430 path must be a range scan, or there will be an assert.
431
432 @param thd Thread handle
433 @param param Parameters for range analysis of this table
434 @param trace_object The optimizer trace object the info is appended to
435 */
436inline void trace_basic_info(THD *thd, const AccessPath *path,
437 const RANGE_OPT_PARAM *param,
438 Opt_trace_object *trace_object) {
439 switch (path->type) {
441 trace_basic_info_index_range_scan(thd, path, param, trace_object);
442 break;
444 trace_basic_info_index_merge(thd, path, param, trace_object);
445 break;
447 trace_basic_info_rowid_intersection(thd, path, param, trace_object);
448 break;
450 trace_basic_info_rowid_union(thd, path, param, trace_object);
451 break;
453 trace_basic_info_index_skip_scan(thd, path, param, trace_object);
454 break;
456 trace_basic_info_group_index_skip_scan(thd, path, param, trace_object);
457 break;
458 default:
459 assert(false);
460 }
461}
462
463inline bool get_forced_by_hint(const AccessPath *path) {
464 switch (path->type) {
466 return false; // There is no hint for plain range scan.
468 return path->index_merge().forced_by_hint;
470 return path->rowid_intersection().forced_by_hint;
472 return path->rowid_union().forced_by_hint;
474 return path->index_skip_scan().forced_by_hint;
476 return path->group_index_skip_scan().forced_by_hint;
477 default:
478 assert(false);
479 return false;
480 }
481}
482
483#ifndef NDEBUG
484/*
485 Print quick select information to DBUG_FILE. Caller is responsible
486 for locking DBUG_FILE before this call and unlocking it afterwards.
487 */
488inline void dbug_dump(const AccessPath *path, int indent, bool verbose) {
489 switch (path->type) {
491 const auto &param = path->index_range_scan();
492 dbug_dump_range(indent, verbose, param.used_key_part[0].field->table,
493 param.index, param.used_key_part,
494 {param.ranges, param.num_ranges});
495 break;
496 }
498 dbug_dump_index_merge(indent, verbose, *path->index_merge().children);
499 break;
500 }
503 *path->rowid_intersection().children);
504 break;
506 dbug_dump_rowid_union(indent, verbose, *path->rowid_union().children);
507 break;
510 break;
513 break;
514 default:
515 assert(false);
516 }
517}
518#endif
519
520#endif // SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
A wrapper class which provides array bounds checking.
Definition: sql_array.h:47
Definition: key.h:57
Definition: key.h:113
const char * name
Name of key.
Definition: key.h:153
A JSON object (unordered set of key/value pairs).
Definition: opt_trace.h:802
Definition: range_optimizer.h:69
uint16 flag
Stores bitwise-or'ed bits defined in enum key_range_flags.
Definition: range_optimizer.h:75
uint16 min_length
Definition: range_optimizer.h:72
Definition: range_opt_param.h:29
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:167
bool append(const String &s)
Definition: sql_string.cc:419
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
static char buf[MAX_BUF]
Definition: conf_to_src.cc:73
const char * p
Definition: ctype-mb.cc:1235
void trace_basic_info_group_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: group_index_skip_scan_plan.cc:79
void dbug_dump_group_index_skip_scan(int indent, bool, const AccessPath *path)
Definition: group_index_skip_scan_plan.cc:1837
void trace_basic_info_index_merge(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_merge_plan.cc:34
void add_keys_and_lengths_index_merge(const AccessPath *path, String *key_names, String *used_lengths)
Definition: index_merge_plan.cc:46
void dbug_dump_index_merge(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: index_merge_plan.cc:71
void trace_basic_info_index_range_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_range_scan_plan.cc:794
void dbug_dump_range(int indent, bool verbose, TABLE *table, int index, KEY_PART *used_key_part, Bounds_checked_array< QUICK_RANGE * > ranges)
Definition: index_range_scan_plan.cc:1312
void dbug_dump_index_skip_scan(int indent, bool verbose, const AccessPath *path)
Definition: index_skip_scan_plan.cc:804
void trace_basic_info_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: index_skip_scan_plan.cc:71
MYSQL_STRINGS_EXPORT char * longlong10_to_str(int64_t val, char *dst, int radix)
Converts a 64-bit integer to its string representation in decimal notation.
Definition: int2str.cc:98
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
Definition: key.cc:411
@ EQ_RANGE
Definition: my_base.h:1097
@ NULL_RANGE
Definition: my_base.h:1102
#define HA_NOSAME
Do not allow duplicate records.
Definition: my_base.h:475
static void bitmap_set_bit(MY_BITMAP *map, uint bit)
Definition: my_bitmap.h:80
static uint verbose
Definition: mysqlcheck.cc:66
static char * path
Definition: mysqldump.cc:149
std::string str(const mysqlrouter::ConfigGenerator::Options::Endpoint &ep)
Definition: config_generator.cc:1073
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Definition: buf0block_hint.cc:30
bool length(const dd::Spatial_reference_system *srs, const Geometry *g1, double *length, bool *null) noexcept
Computes the length of linestrings and multilinestrings.
Definition: length.cc:76
void add_info_string(const AccessPath *path, String *str)
Definition: path_helpers.h:295
bool unique_key_range(const AccessPath *path)
Return true if there is only one range and this uses the whole unique key.
Definition: path_helpers.h:130
bool is_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:41
void add_keys_and_lengths(const AccessPath *path, String *key_names, String *used_lengths)
Definition: path_helpers.h:379
unsigned used_index(const AccessPath *path)
If this is an index range scan, and that range scan uses a single index, returns the index used.
Definition: path_helpers.h:114
bool get_forced_by_hint(const AccessPath *path)
Definition: path_helpers.h:463
unsigned get_max_used_key_length(const AccessPath *path)
Get the total length of first used_key_parts parts of the key, in bytes.
Definition: path_helpers.h:260
bool uses_index_on_fields(const AccessPath *path, const MY_BITMAP *fields)
Return whether any index used by this range scan uses the field(s) marked in passed bitmap.
Definition: path_helpers.h:215
void trace_basic_info(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Add basic info for this range scan to the optimizer trace.
Definition: path_helpers.h:436
bool is_agg_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:46
void set_need_sorted_output(AccessPath *path)
Definition: path_helpers.h:96
unsigned get_used_key_parts(const AccessPath *path)
Definition: path_helpers.h:193
bool make_reverse(uint used_key_parts, AccessPath *path)
Ask the AccessPath to reverse itself; returns false if successful.
Definition: path_helpers.h:80
bool is_reverse_sorted_range(const AccessPath *path)
Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse order.
Definition: path_helpers.h:71
void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields)
Definition: path_helpers.h:146
void dbug_dump(const AccessPath *path, int indent, bool verbose)
Definition: path_helpers.h:488
bool reverse_sort_possible(const AccessPath *path)
Whether the range access method is capable of returning records in reverse order.
Definition: path_helpers.h:63
required string key
Definition: replication_asynchronous_connection_failover.proto:60
void add_keys_and_lengths_rowid_union(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:947
void add_keys_and_lengths_rowid_intersection(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:912
void dbug_dump_rowid_union(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:972
void trace_basic_info_rowid_intersection(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:112
void trace_basic_info_rowid_union(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:130
void dbug_dump_rowid_intersection(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:962
#define HA_MRR_SORTED
Definition: handler.h:4001
constexpr const unsigned int MAX_KEY
Definition: sql_const.h:46
#define STRING_WITH_LEN(X)
Definition: string_with_len.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:213
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:250
@ INDEX_RANGE_SCAN
Definition: access_path.h:245
@ ROWID_UNION
Definition: access_path.h:248
@ INDEX_SKIP_SCAN
Definition: access_path.h:249
@ INDEX_MERGE
Definition: access_path.h:246
@ ROWID_INTERSECTION
Definition: access_path.h:247
Definition: my_bitmap.h:43
Definition: table.h:1405
Definition: gen_lex_token.cc:149