MySQL 9.0.0
Source Code Documentation
path_helpers.h
Go to the documentation of this file.
1/* Copyright (c) 2021, 2024, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is designed to work with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have either included with
13 the program or referenced in the documentation.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License, version 2.0, for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
23
24#ifndef SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
25#define SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
26
27/**
28 @file
29 Various small helpers to abstract over the fact that AccessPath can contain
30 a number of different range scan types.
31 */
32
41
42inline bool is_loose_index_scan(const AccessPath *path) {
43 return path->type == AccessPath::INDEX_SKIP_SCAN ||
45}
46
48 switch (path->type) {
50 return path->index_skip_scan().param->has_aggregate_function;
51 break;
53 return path->group_index_skip_scan().param->have_agg_distinct;
54 break;
55 default:
56 return false;
57 }
58}
59
60/**
61 Whether the range access method is capable of returning records
62 in reverse order.
63 */
65 return path->type == AccessPath::INDEX_RANGE_SCAN;
66}
67
68/**
69 Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse
70 order. (Note that non-range index scans return false here.)
71 */
73 return path->type == AccessPath::INDEX_RANGE_SCAN &&
74 path->index_range_scan().reverse;
75}
76
77/**
78 Ask the AccessPath to reverse itself; returns false if successful.
79 Overridden only in INDEX_RANGE_SCAN.
80 */
81inline bool make_reverse(uint used_key_parts, AccessPath *path) {
83 if (path->index_range_scan().geometry) {
84 return true;
85 }
86 path->index_range_scan().reverse = true;
87 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
88 path->index_range_scan().using_extended_key_parts =
89 (used_key_parts > table->key_info[path->index_range_scan().index]
90 .user_defined_key_parts);
91 return false;
92 } else {
93 return true;
94 }
95}
96
98 switch (path->type) {
100 path->index_range_scan().mrr_flags |= HA_MRR_SORTED;
101 break;
104 // Always sorted already.
105 break;
106 default:
107 assert(false);
108 }
109}
110
111/**
112 If this is an index range scan, and that range scan uses a single
113 index, returns the index used. Otherwise, MAX_KEY.
114 */
115inline unsigned used_index(const AccessPath *path) {
116 switch (path->type) {
118 return path->index_range_scan().index;
120 return path->index_skip_scan().index;
122 return path->group_index_skip_scan().index;
123 default:
124 return MAX_KEY;
125 }
126}
127
128/**
129 Return true if there is only one range and this uses the whole unique key.
130 */
131inline bool unique_key_range(const AccessPath *path) {
132 if (path->type != AccessPath::INDEX_RANGE_SCAN) {
133 return false;
134 }
135 if (path->index_range_scan().num_ranges == 1) {
136 QUICK_RANGE *tmp = path->index_range_scan().ranges[0];
137 if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) {
138 KEY *key =
139 path->index_range_scan().used_key_part[0].field->table->key_info +
140 path->index_range_scan().index;
141 return (key->flags & HA_NOSAME) && key->key_length == tmp->min_length;
142 }
143 }
144 return false;
145}
146
147inline void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields) {
148 switch (path->type) {
150 for (uint i = 0; i < path->index_range_scan().num_used_key_parts; ++i) {
152 used_fields,
153 path->index_range_scan().used_key_part[i].field->field_index());
154 }
155 break;
157 for (AccessPath *child : *path->index_merge().children) {
158 get_fields_used(child, used_fields);
159 }
160 break;
162 for (AccessPath *child : *path->rowid_intersection().children) {
163 get_fields_used(child, used_fields);
164 }
165 if (path->rowid_intersection().cpk_child != nullptr) {
166 get_fields_used(path->rowid_intersection().cpk_child, used_fields);
167 }
168 break;
170 for (AccessPath *child : *path->rowid_union().children) {
171 get_fields_used(child, used_fields);
172 }
173 break;
175 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts; ++i) {
176 bitmap_set_bit(used_fields, path->index_skip_scan()
177 .param->index_info->key_part[i]
178 .field->field_index());
179 }
180 break;
182 for (uint i = 0; i < path->group_index_skip_scan().num_used_key_parts;
183 ++i) {
184 bitmap_set_bit(used_fields, path->group_index_skip_scan()
185 .param->index_info->key_part[i]
186 .field->field_index());
187 }
188 break;
189 default:
190 assert(false);
191 }
192}
193
194inline unsigned get_used_key_parts(const AccessPath *path) {
195 switch (path->type) {
197 return path->index_range_scan().num_used_key_parts;
199 return path->index_skip_scan().num_used_key_parts;
201 return path->group_index_skip_scan().num_used_key_parts;
202 case AccessPath::REF:
203 return path->ref().ref->key_parts;
205 return path->ref_or_null().ref->key_parts;
207 return path->eq_ref().ref->key_parts;
209 return path->pushed_join_ref().ref->key_parts;
211 return path->full_text_search().ref->key_parts;
212 case AccessPath::MRR:
213 return path->mrr().ref->key_parts;
219 return 0;
220 default:
221 assert(false);
222 return 0;
223 }
224}
225
226/**
227 Return whether any index used by this range scan uses the field(s)
228 marked in passed bitmap. Assert-fails if not a range scan.
229 */
231 const MY_BITMAP *fields) {
232 switch (path->type) {
234 return is_key_used(path->index_range_scan().used_key_part[0].field->table,
235 path->index_range_scan().index, fields);
237 for (AccessPath *child : *path->index_merge().children) {
238 if (uses_index_on_fields(child, fields)) {
239 return true;
240 }
241 }
242 return false;
244 for (AccessPath *child : *path->rowid_intersection().children) {
245 if (uses_index_on_fields(child, fields)) {
246 return true;
247 }
248 }
249 return path->rowid_intersection().cpk_child != nullptr &&
250 uses_index_on_fields(path->rowid_intersection().cpk_child, fields);
252 for (AccessPath *child : *path->rowid_union().children) {
253 if (uses_index_on_fields(child, fields)) {
254 return true;
255 }
256 }
257 return false;
259 return is_key_used(path->index_skip_scan().table,
260 path->index_skip_scan().index, fields);
262 return is_key_used(path->group_index_skip_scan().table,
263 path->group_index_skip_scan().index, fields);
264 default:
265 assert(false);
266 return false;
267 }
268}
269
270/**
271 Get the total length of first used_key_parts parts of the key,
272 in bytes. Only applicable for range access types that use a single
273 index (others will assert-fail).
274 */
275inline unsigned get_max_used_key_length(const AccessPath *path) {
276 switch (path->type) {
278 int max_used_key_length = 0;
279 Bounds_checked_array ranges{path->index_range_scan().ranges,
280 path->index_range_scan().num_ranges};
281 for (const QUICK_RANGE *range : ranges) {
282 max_used_key_length =
283 std::max<int>(max_used_key_length, range->min_length);
284 max_used_key_length =
285 std::max<int>(max_used_key_length, range->max_length);
286 }
287 return max_used_key_length;
288 }
290 int max_used_key_length = 0;
291 KEY_PART_INFO *p = path->index_skip_scan().param->index_info->key_part;
292 for (uint i = 0; i < path->index_skip_scan().num_used_key_parts;
293 i++, p++) {
294 max_used_key_length += p->store_length;
295 }
296 return max_used_key_length;
297 }
299 return path->group_index_skip_scan().param->max_used_key_length;
300 default:
301 assert(false);
302 return 0;
303 }
304}
305
306/*
307 Append text representation of the range scan (what and how is
308 merged) to str. The result is added to "Extra" field in EXPLAIN output.
309 */
310inline void add_info_string(const AccessPath *path, String *str) {
311 switch (path->type) {
313 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
314 KEY *key_info = table->key_info + path->index_range_scan().index;
315 str->append(key_info->name);
316 break;
317 }
319 bool first = true;
320 TABLE *table = path->index_merge().table;
321 str->append(STRING_WITH_LEN("sort_union("));
322
323 // For EXPLAIN compatibility with older versions, PRIMARY is always
324 // printed last.
325 for (bool print_primary : {false, true}) {
326 for (AccessPath *child : *path->index_merge().children) {
327 const bool is_primary = table->file->primary_key_is_clustered() &&
328 used_index(child) == table->s->primary_key;
329 if (is_primary != print_primary) continue;
330 if (!first)
331 str->append(',');
332 else
333 first = false;
334 ::add_info_string(child, str);
335 }
336 }
337 str->append(')');
338 break;
339 }
341 bool first = true;
342 str->append(STRING_WITH_LEN("intersect("));
343 for (AccessPath *current : *path->rowid_intersection().children) {
344 if (!first)
345 str->append(',');
346 else
347 first = false;
348 ::add_info_string(current, str);
349 }
350 if (path->rowid_intersection().cpk_child) {
351 str->append(',');
352 ::add_info_string(path->rowid_intersection().cpk_child, str);
353 }
354 str->append(')');
355 break;
356 }
358 bool first = true;
359 str->append(STRING_WITH_LEN("union("));
360 for (AccessPath *current : *path->rowid_union().children) {
361 if (!first)
362 str->append(',');
363 else
364 first = false;
365 ::add_info_string(current, str);
366 }
367 str->append(')');
368 break;
369 }
371 str->append(STRING_WITH_LEN("index_for_skip_scan("));
372 str->append(path->index_skip_scan().param->index_info->name);
373 str->append(')');
374 break;
375 }
377 str->append(STRING_WITH_LEN("index_for_group_by("));
378 str->append(path->group_index_skip_scan().param->index_info->name);
379 str->append(')');
380 break;
381 }
382 default:
383 assert(false);
384 }
385}
386
387/*
388 Append comma-separated list of keys this quick select uses to key_names;
389 append comma-separated list of corresponding used lengths to used_lengths.
390 This is used by select_describe.
391
392 path must be a range scan, or there will be an assert.
393 */
394inline void add_keys_and_lengths(const AccessPath *path, String *key_names,
395 String *used_lengths) {
396 switch (path->type) {
398 TABLE *table = path->index_range_scan().used_key_part[0].field->table;
399 KEY *key_info = table->key_info + path->index_range_scan().index;
400 key_names->append(key_info->name);
401
402 char buf[64];
403 size_t length =
405 used_lengths->append(buf, length);
406 break;
407 }
409 add_keys_and_lengths_index_merge(path, key_names, used_lengths);
410 break;
412 add_keys_and_lengths_rowid_intersection(path, key_names, used_lengths);
413 break;
415 add_keys_and_lengths_rowid_union(path, key_names, used_lengths);
416 break;
418 key_names->append(path->index_skip_scan().param->index_info->name);
419
420 char buf[64];
421 uint length =
423 used_lengths->append(buf, length);
424
425 break;
426 }
428 key_names->append(path->group_index_skip_scan().param->index_info->name);
429
430 char buf[64];
431 uint length =
433 used_lengths->append(buf, length);
434
435 break;
436 }
437 default:
438 assert(false);
439 }
440}
441
442/**
443 Add basic info for this range scan to the optimizer trace.
444
445 path must be a range scan, or there will be an assert.
446
447 @param thd Thread handle
448 @param param Parameters for range analysis of this table
449 @param trace_object The optimizer trace object the info is appended to
450 */
451inline void trace_basic_info(THD *thd, const AccessPath *path,
452 const RANGE_OPT_PARAM *param,
453 Opt_trace_object *trace_object) {
454 switch (path->type) {
456 trace_basic_info_index_range_scan(thd, path, param, trace_object);
457 break;
459 trace_basic_info_index_merge(thd, path, param, trace_object);
460 break;
462 trace_basic_info_rowid_intersection(thd, path, param, trace_object);
463 break;
465 trace_basic_info_rowid_union(thd, path, param, trace_object);
466 break;
468 trace_basic_info_index_skip_scan(thd, path, param, trace_object);
469 break;
471 trace_basic_info_group_index_skip_scan(thd, path, param, trace_object);
472 break;
473 default:
474 assert(false);
475 }
476}
477
478inline bool get_forced_by_hint(const AccessPath *path) {
479 switch (path->type) {
481 return false; // There is no hint for plain range scan.
483 return path->index_merge().forced_by_hint;
485 return path->rowid_intersection().forced_by_hint;
487 return path->rowid_union().forced_by_hint;
489 return path->index_skip_scan().forced_by_hint;
491 return path->group_index_skip_scan().forced_by_hint;
492 default:
493 assert(false);
494 return false;
495 }
496}
497
498#ifndef NDEBUG
499/*
500 Print quick select information to DBUG_FILE. Caller is responsible
501 for locking DBUG_FILE before this call and unlocking it afterwards.
502 */
503inline void dbug_dump(const AccessPath *path, int indent, bool verbose) {
504 switch (path->type) {
506 const auto &param = path->index_range_scan();
507 dbug_dump_range(indent, verbose, param.used_key_part[0].field->table,
508 param.index, param.used_key_part,
509 {param.ranges, param.num_ranges});
510 break;
511 }
513 dbug_dump_index_merge(indent, verbose, *path->index_merge().children);
514 break;
515 }
518 *path->rowid_intersection().children);
519 break;
521 dbug_dump_rowid_union(indent, verbose, *path->rowid_union().children);
522 break;
525 break;
528 break;
529 default:
530 assert(false);
531 }
532}
533#endif
534
535#endif // SQL_RANGE_OPTIMIZER_PATH_HELPERS_H_
A wrapper class which provides array bounds checking.
Definition: sql_array.h:47
Definition: key.h:57
Definition: key.h:113
const char * name
Name of key.
Definition: key.h:153
A JSON object (unordered set of key/value pairs).
Definition: opt_trace.h:802
Definition: range_optimizer.h:69
uint16 flag
Stores bitwise-or'ed bits defined in enum key_range_flags.
Definition: range_optimizer.h:75
uint16 min_length
Definition: range_optimizer.h:72
Definition: range_opt_param.h:29
Using this class is fraught with peril, and you need to be very careful when doing so.
Definition: sql_string.h:167
bool append(const String &s)
Definition: sql_string.cc:419
For each client connection we create a separate thread with THD serving as a thread/connection descri...
Definition: sql_lexer_thd.h:36
static char buf[MAX_BUF]
Definition: conf_to_src.cc:73
const char * p
Definition: ctype-mb.cc:1225
void trace_basic_info_group_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: group_index_skip_scan_plan.cc:79
void dbug_dump_group_index_skip_scan(int indent, bool, const AccessPath *path)
Definition: group_index_skip_scan_plan.cc:1837
void trace_basic_info_index_merge(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_merge_plan.cc:34
void add_keys_and_lengths_index_merge(const AccessPath *path, String *key_names, String *used_lengths)
Definition: index_merge_plan.cc:46
void dbug_dump_index_merge(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: index_merge_plan.cc:71
void trace_basic_info_index_range_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: index_range_scan_plan.cc:794
void dbug_dump_range(int indent, bool verbose, TABLE *table, int index, KEY_PART *used_key_part, Bounds_checked_array< QUICK_RANGE * > ranges)
Definition: index_range_scan_plan.cc:1312
void dbug_dump_index_skip_scan(int indent, bool verbose, const AccessPath *path)
Definition: index_skip_scan_plan.cc:804
void trace_basic_info_index_skip_scan(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *, Opt_trace_object *trace_object)
Definition: index_skip_scan_plan.cc:71
MYSQL_STRINGS_EXPORT char * longlong10_to_str(int64_t val, char *dst, int radix)
Converts a 64-bit integer to its string representation in decimal notation.
Definition: int2str.cc:98
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields)
Definition: key.cc:411
@ EQ_RANGE
Definition: my_base.h:1097
@ NULL_RANGE
Definition: my_base.h:1102
#define HA_NOSAME
Do not allow duplicate records.
Definition: my_base.h:475
static void bitmap_set_bit(MY_BITMAP *map, uint bit)
Definition: my_bitmap.h:80
static uint verbose
Definition: mysqlcheck.cc:66
static char * path
Definition: mysqldump.cc:149
std::string str(const mysqlrouter::ConfigGenerator::Options::Endpoint &ep)
Definition: config_generator.cc:1081
static PFS_engine_table_share_proxy table
Definition: pfs.cc:61
Definition: buf0block_hint.cc:30
bool length(const dd::Spatial_reference_system *srs, const Geometry *g1, double *length, bool *null) noexcept
Computes the length of linestrings and multilinestrings.
Definition: length.cc:76
void add_info_string(const AccessPath *path, String *str)
Definition: path_helpers.h:310
bool unique_key_range(const AccessPath *path)
Return true if there is only one range and this uses the whole unique key.
Definition: path_helpers.h:131
bool is_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:42
void add_keys_and_lengths(const AccessPath *path, String *key_names, String *used_lengths)
Definition: path_helpers.h:394
unsigned used_index(const AccessPath *path)
If this is an index range scan, and that range scan uses a single index, returns the index used.
Definition: path_helpers.h:115
bool get_forced_by_hint(const AccessPath *path)
Definition: path_helpers.h:478
unsigned get_max_used_key_length(const AccessPath *path)
Get the total length of first used_key_parts parts of the key, in bytes.
Definition: path_helpers.h:275
bool uses_index_on_fields(const AccessPath *path, const MY_BITMAP *fields)
Return whether any index used by this range scan uses the field(s) marked in passed bitmap.
Definition: path_helpers.h:230
void trace_basic_info(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Add basic info for this range scan to the optimizer trace.
Definition: path_helpers.h:451
bool is_agg_loose_index_scan(const AccessPath *path)
Definition: path_helpers.h:47
void set_need_sorted_output(AccessPath *path)
Definition: path_helpers.h:97
unsigned get_used_key_parts(const AccessPath *path)
Definition: path_helpers.h:194
bool make_reverse(uint used_key_parts, AccessPath *path)
Ask the AccessPath to reverse itself; returns false if successful.
Definition: path_helpers.h:81
bool is_reverse_sorted_range(const AccessPath *path)
Whether the access path is an INDEX_RANGE_SCAN that returns rows in reverse order.
Definition: path_helpers.h:72
void get_fields_used(const AccessPath *path, MY_BITMAP *used_fields)
Definition: path_helpers.h:147
void dbug_dump(const AccessPath *path, int indent, bool verbose)
Definition: path_helpers.h:503
bool reverse_sort_possible(const AccessPath *path)
Whether the range access method is capable of returning records in reverse order.
Definition: path_helpers.h:64
required string key
Definition: replication_asynchronous_connection_failover.proto:60
void add_keys_and_lengths_rowid_union(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:947
void add_keys_and_lengths_rowid_intersection(const AccessPath *path, String *key_names, String *used_lengths)
Definition: rowid_ordered_retrieval_plan.cc:912
void dbug_dump_rowid_union(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:972
void trace_basic_info_rowid_intersection(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:112
void trace_basic_info_rowid_union(THD *thd, const AccessPath *path, const RANGE_OPT_PARAM *param, Opt_trace_object *trace_object)
Definition: rowid_ordered_retrieval_plan.cc:130
void dbug_dump_rowid_intersection(int indent, bool verbose, const Mem_root_array< AccessPath * > &children)
Definition: rowid_ordered_retrieval_plan.cc:962
#define HA_MRR_SORTED
Definition: handler.h:4002
constexpr const unsigned int MAX_KEY
Definition: sql_const.h:46
Common types of the Optimizer, used by optimization and execution.
#define STRING_WITH_LEN(X)
Definition: string_with_len.h:29
Access paths are a query planning structure that correspond 1:1 to iterators, in that an access path ...
Definition: access_path.h:213
@ PUSHED_JOIN_REF
Definition: access_path.h:240
@ GROUP_INDEX_SKIP_SCAN
Definition: access_path.h:250
@ INDEX_RANGE_SCAN
Definition: access_path.h:245
@ EQ_REF
Definition: access_path.h:239
@ REF_OR_NULL
Definition: access_path.h:238
@ ROWID_UNION
Definition: access_path.h:248
@ INDEX_SKIP_SCAN
Definition: access_path.h:249
@ MRR
Definition: access_path.h:243
@ INDEX_SCAN
Definition: access_path.h:235
@ REF
Definition: access_path.h:237
@ INDEX_MERGE
Definition: access_path.h:246
@ FULL_TEXT_SEARCH
Definition: access_path.h:241
@ ROWID_INTERSECTION
Definition: access_path.h:247
@ INDEX_DISTANCE_SCAN
Definition: access_path.h:236
Definition: my_bitmap.h:43
Definition: table.h:1407
Definition: gen_lex_token.cc:149